From 4c8c3c7f70a6779d30f5492acbc9978f4636fe7a Mon Sep 17 00:00:00 2001 From: Valentin Schneider Date: Tue, 7 Mar 2023 14:35:56 +0000 Subject: treewide: Trace IPIs sent via smp_send_reschedule() To be able to trace invocations of smp_send_reschedule(), rename the arch-specific definitions of it to arch_smp_send_reschedule() and wrap it into an smp_send_reschedule() that contains a tracepoint. Changes to include the declaration of the tracepoint were driven by the following coccinelle script: @func_use@ @@ smp_send_reschedule(...); @include@ @@ #include @no_include depends on func_use && !include@ @@ #include <...> + + #include [csky bits] [riscv bits] Signed-off-by: Valentin Schneider Signed-off-by: Peter Zijlstra (Intel) Acked-by: Guo Ren Acked-by: Palmer Dabbelt Link: https://lore.kernel.org/r/20230307143558.294354-6-vschneid@redhat.com --- include/linux/smp.h | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) (limited to 'include/linux/smp.h') diff --git a/include/linux/smp.h b/include/linux/smp.h index a80ab58ae3f1..c036a2228d8d 100644 --- a/include/linux/smp.h +++ b/include/linux/smp.h @@ -125,8 +125,15 @@ extern void smp_send_stop(void); /* * sends a 'reschedule' event to another CPU: */ -extern void smp_send_reschedule(int cpu); - +extern void arch_smp_send_reschedule(int cpu); +/* + * scheduler_ipi() is inline so can't be passed as callback reason, but the + * callsite IP should be sufficient for root-causing IPIs sent from here. + */ +#define smp_send_reschedule(cpu) ({ \ + trace_ipi_send_cpumask(cpumask_of(cpu), _RET_IP_, NULL); \ + arch_smp_send_reschedule(cpu); \ +}) /* * Prepare machine for booting other CPUs. -- cgit v1.2.3-70-g09d2 From 68e2d17c9eb311ab59aeb6d0c38aad8985fa2596 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Wed, 22 Mar 2023 11:28:36 +0100 Subject: trace: Add trace_ipi_send_cpu() Because copying cpumasks around when targeting a single CPU is a bit daft... Tested-and-reviewed-by: Valentin Schneider Signed-off-by: Peter Zijlstra (Intel) Link: https://lkml.kernel.org/r/20230322103004.GA571242%40hirez.programming.kicks-ass.net --- include/linux/smp.h | 6 +++--- include/trace/events/ipi.h | 22 ++++++++++++++++++++++ kernel/irq_work.c | 6 ++---- kernel/sched/core.c | 1 + kernel/smp.c | 4 ++-- 5 files changed, 30 insertions(+), 9 deletions(-) (limited to 'include/linux/smp.h') diff --git a/include/linux/smp.h b/include/linux/smp.h index c036a2228d8d..ed8f344ba627 100644 --- a/include/linux/smp.h +++ b/include/linux/smp.h @@ -130,9 +130,9 @@ extern void arch_smp_send_reschedule(int cpu); * scheduler_ipi() is inline so can't be passed as callback reason, but the * callsite IP should be sufficient for root-causing IPIs sent from here. */ -#define smp_send_reschedule(cpu) ({ \ - trace_ipi_send_cpumask(cpumask_of(cpu), _RET_IP_, NULL); \ - arch_smp_send_reschedule(cpu); \ +#define smp_send_reschedule(cpu) ({ \ + trace_ipi_send_cpu(cpu, _RET_IP_, NULL); \ + arch_smp_send_reschedule(cpu); \ }) /* diff --git a/include/trace/events/ipi.h b/include/trace/events/ipi.h index b1125dc27682..3de9bfc982ce 100644 --- a/include/trace/events/ipi.h +++ b/include/trace/events/ipi.h @@ -35,6 +35,28 @@ TRACE_EVENT(ipi_raise, TP_printk("target_mask=%s (%s)", __get_bitmask(target_cpus), __entry->reason) ); +TRACE_EVENT(ipi_send_cpu, + + TP_PROTO(const unsigned int cpu, unsigned long callsite, void *callback), + + TP_ARGS(cpu, callsite, callback), + + TP_STRUCT__entry( + __field(unsigned int, cpu) + __field(void *, callsite) + __field(void *, callback) + ), + + TP_fast_assign( + __entry->cpu = cpu; + __entry->callsite = (void *)callsite; + __entry->callback = callback; + ), + + TP_printk("cpu=%u callsite=%pS callback=%pS", + __entry->cpu, __entry->callsite, __entry->callback) +); + TRACE_EVENT(ipi_send_cpumask, TP_PROTO(const struct cpumask *cpumask, unsigned long callsite, void *callback), diff --git a/kernel/irq_work.c b/kernel/irq_work.c index c33e88e32a67..2f4fb336dda1 100644 --- a/kernel/irq_work.c +++ b/kernel/irq_work.c @@ -78,10 +78,8 @@ void __weak arch_irq_work_raise(void) static __always_inline void irq_work_raise(struct irq_work *work) { - if (trace_ipi_send_cpumask_enabled() && arch_irq_work_has_interrupt()) - trace_ipi_send_cpumask(cpumask_of(smp_processor_id()), - _RET_IP_, - work->func); + if (trace_ipi_send_cpu_enabled() && arch_irq_work_has_interrupt()) + trace_ipi_send_cpu(smp_processor_id(), _RET_IP_, work->func); arch_irq_work_raise(); } diff --git a/kernel/sched/core.c b/kernel/sched/core.c index b0a48cfc0a22..ad40755ddc11 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -96,6 +96,7 @@ #include "../../io_uring/io-wq.h" #include "../smpboot.h" +EXPORT_TRACEPOINT_SYMBOL_GPL(ipi_send_cpu); EXPORT_TRACEPOINT_SYMBOL_GPL(ipi_send_cpumask); /* diff --git a/kernel/smp.c b/kernel/smp.c index 37e9613a0889..43f0796ecdb2 100644 --- a/kernel/smp.c +++ b/kernel/smp.c @@ -107,7 +107,7 @@ static __always_inline void send_call_function_single_ipi(int cpu, smp_call_func_t func) { if (call_function_single_prep_ipi(cpu)) { - trace_ipi_send_cpumask(cpumask_of(cpu), _RET_IP_, func); + trace_ipi_send_cpu(cpu, _RET_IP_, func); arch_send_call_function_single_ipi(cpu); } } @@ -346,7 +346,7 @@ void __smp_call_single_queue(int cpu, struct llist_node *node) * even if we haven't sent the smp_call IPI yet (e.g. the stopper * executes migration_cpu_stop() on the remote CPU). */ - if (trace_ipi_send_cpumask_enabled()) { + if (trace_ipi_send_cpu_enabled()) { call_single_data_t *csd; smp_call_func_t func; -- cgit v1.2.3-70-g09d2 From 7412a60decec2a6744cf773280ff17a0f89e8395 Mon Sep 17 00:00:00 2001 From: Josh Poimboeuf Date: Wed, 12 Apr 2023 16:49:35 -0700 Subject: cpu: Mark panic_smp_self_stop() __noreturn In preparation for improving objtool's handling of weak noreturn functions, mark panic_smp_self_stop() __noreturn. Signed-off-by: Josh Poimboeuf Signed-off-by: Peter Zijlstra (Intel) Link: https://lore.kernel.org/r/92d76ab5c8bf660f04fdcd3da1084519212de248.1681342859.git.jpoimboe@kernel.org --- arch/arm/kernel/smp.c | 2 +- arch/arm64/include/asm/smp.h | 1 - arch/arm64/kernel/smp.c | 2 +- arch/powerpc/kernel/setup_64.c | 2 +- include/linux/smp.h | 2 +- kernel/panic.c | 2 +- tools/objtool/check.c | 1 + 7 files changed, 6 insertions(+), 6 deletions(-) (limited to 'include/linux/smp.h') diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c index d6be4507d22d..f4a4ac028b6b 100644 --- a/arch/arm/kernel/smp.c +++ b/arch/arm/kernel/smp.c @@ -779,7 +779,7 @@ void smp_send_stop(void) * kdump fails. So split out the panic_smp_self_stop() and add * set_cpu_online(smp_processor_id(), false). */ -void panic_smp_self_stop(void) +void __noreturn panic_smp_self_stop(void) { pr_debug("CPU %u will stop doing anything useful since another CPU has paniced\n", smp_processor_id()); diff --git a/arch/arm64/include/asm/smp.h b/arch/arm64/include/asm/smp.h index 07f4ea1490f4..f2d26235bfb4 100644 --- a/arch/arm64/include/asm/smp.h +++ b/arch/arm64/include/asm/smp.h @@ -143,7 +143,6 @@ bool cpus_are_stuck_in_kernel(void); extern void crash_smp_send_stop(void); extern bool smp_crash_stop_failed(void); -extern void panic_smp_self_stop(void); #endif /* ifndef __ASSEMBLY__ */ diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c index 07d156fddb5f..05fe797e4203 100644 --- a/arch/arm64/kernel/smp.c +++ b/arch/arm64/kernel/smp.c @@ -830,7 +830,7 @@ static void __noreturn local_cpu_stop(void) * that cpu_online_mask gets correctly updated and smp_send_stop() can skip * CPUs that have already stopped themselves. */ -void panic_smp_self_stop(void) +void __noreturn panic_smp_self_stop(void) { local_cpu_stop(); } diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c index b2e0d3ce4261..246201d0d879 100644 --- a/arch/powerpc/kernel/setup_64.c +++ b/arch/powerpc/kernel/setup_64.c @@ -480,7 +480,7 @@ void early_setup_secondary(void) #endif /* CONFIG_SMP */ -void panic_smp_self_stop(void) +void __noreturn panic_smp_self_stop(void) { hard_irq_disable(); spin_begin(); diff --git a/include/linux/smp.h b/include/linux/smp.h index a80ab58ae3f1..2a737b39cf0a 100644 --- a/include/linux/smp.h +++ b/include/linux/smp.h @@ -59,7 +59,7 @@ int smp_call_function_single_async(int cpu, struct __call_single_data *csd); * Cpus stopping functions in panic. All have default weak definitions. * Architecture-dependent code may override them. */ -void panic_smp_self_stop(void); +void __noreturn panic_smp_self_stop(void); void nmi_panic_self_stop(struct pt_regs *regs); void crash_smp_send_stop(void); diff --git a/kernel/panic.c b/kernel/panic.c index 5cfea8302d23..5e4982db8dc9 100644 --- a/kernel/panic.c +++ b/kernel/panic.c @@ -141,7 +141,7 @@ EXPORT_SYMBOL(panic_blink); /* * Stop ourself in panic -- architecture code may override this */ -void __weak panic_smp_self_stop(void) +void __weak __noreturn panic_smp_self_stop(void) { while (1) cpu_relax(); diff --git a/tools/objtool/check.c b/tools/objtool/check.c index a436bc1e63d9..4b52ed6bff66 100644 --- a/tools/objtool/check.c +++ b/tools/objtool/check.c @@ -218,6 +218,7 @@ static bool __dead_end_function(struct objtool_file *file, struct symbol *func, "machine_real_restart", "make_task_dead", "panic", + "panic_smp_self_stop", "rest_init", "rewind_stack_and_make_dead", "sev_es_terminate", -- cgit v1.2.3-70-g09d2 From 27dea14c7f05106f39850a9239874cd38703b405 Mon Sep 17 00:00:00 2001 From: Josh Poimboeuf Date: Wed, 12 Apr 2023 16:49:36 -0700 Subject: cpu: Mark nmi_panic_self_stop() __noreturn In preparation for improving objtool's handling of weak noreturn functions, mark nmi_panic_self_stop() __noreturn. Signed-off-by: Josh Poimboeuf Signed-off-by: Peter Zijlstra (Intel) Link: https://lore.kernel.org/r/316fc6dfab5a8c4e024c7185484a1ee5fb0afb79.1681342859.git.jpoimboe@kernel.org --- arch/x86/include/asm/reboot.h | 1 - arch/x86/kernel/reboot.c | 2 +- include/linux/smp.h | 2 +- kernel/panic.c | 2 +- tools/objtool/check.c | 1 + 5 files changed, 4 insertions(+), 4 deletions(-) (limited to 'include/linux/smp.h') diff --git a/arch/x86/include/asm/reboot.h b/arch/x86/include/asm/reboot.h index bc5b4d788c08..9177b4354c3f 100644 --- a/arch/x86/include/asm/reboot.h +++ b/arch/x86/include/asm/reboot.h @@ -28,7 +28,6 @@ void __noreturn machine_real_restart(unsigned int type); void cpu_emergency_disable_virtualization(void); typedef void (*nmi_shootdown_cb)(int, struct pt_regs*); -void nmi_panic_self_stop(struct pt_regs *regs); void nmi_shootdown_cpus(nmi_shootdown_cb callback); void run_crash_ipi_callback(struct pt_regs *regs); diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c index d03c551defcc..3adbe97015c1 100644 --- a/arch/x86/kernel/reboot.c +++ b/arch/x86/kernel/reboot.c @@ -920,7 +920,7 @@ void run_crash_ipi_callback(struct pt_regs *regs) } /* Override the weak function in kernel/panic.c */ -void nmi_panic_self_stop(struct pt_regs *regs) +void __noreturn nmi_panic_self_stop(struct pt_regs *regs) { while (1) { /* If no CPU is preparing crash dump, we simply loop here. */ diff --git a/include/linux/smp.h b/include/linux/smp.h index 2a737b39cf0a..7b93504eed26 100644 --- a/include/linux/smp.h +++ b/include/linux/smp.h @@ -60,7 +60,7 @@ int smp_call_function_single_async(int cpu, struct __call_single_data *csd); * Architecture-dependent code may override them. */ void __noreturn panic_smp_self_stop(void); -void nmi_panic_self_stop(struct pt_regs *regs); +void __noreturn nmi_panic_self_stop(struct pt_regs *regs); void crash_smp_send_stop(void); /* diff --git a/kernel/panic.c b/kernel/panic.c index 5e4982db8dc9..886d2ebd0a0d 100644 --- a/kernel/panic.c +++ b/kernel/panic.c @@ -151,7 +151,7 @@ void __weak __noreturn panic_smp_self_stop(void) * Stop ourselves in NMI context if another CPU has already panicked. Arch code * may override this to prepare for crash dumping, e.g. save regs info. */ -void __weak nmi_panic_self_stop(struct pt_regs *regs) +void __weak __noreturn nmi_panic_self_stop(struct pt_regs *regs) { panic_smp_self_stop(); } diff --git a/tools/objtool/check.c b/tools/objtool/check.c index 4b52ed6bff66..8d073bfd7d7f 100644 --- a/tools/objtool/check.c +++ b/tools/objtool/check.c @@ -217,6 +217,7 @@ static bool __dead_end_function(struct objtool_file *file, struct symbol *func, "lbug_with_loc", "machine_real_restart", "make_task_dead", + "nmi_panic_self_stop", "panic", "panic_smp_self_stop", "rest_init", -- cgit v1.2.3-70-g09d2