diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2024-11-28 11:46:13 -0800 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2024-11-28 11:46:13 -0800 |
commit | 7af08b57bcb9ebf78675c50069c54125c0a8b795 (patch) | |
tree | 29d7a2ca5cc7f9f1b874b1377479cd04caeccb4b /include | |
parent | 65ae975e97d5aab3ee9dc5ec701b12090572ed43 (diff) | |
parent | 2bd9b57d04df417f31ef54448477c212fcdd14fc (diff) |
Merge tag 'trace-v6.13-2' of git://git.kernel.org/pub/scm/linux/kernel/git/trace/linux-trace
Pull more tracing updates from Steven Rostedt:
- Add trace flag for NEED_RESCHED_LAZY
Now that NEED_RESCHED_LAZY is upstream, add it to the status bits of
the common_flags. This will now show when the NEED_RESCHED_LAZY flag
is set that is used for debugging latency issues in the kernel via a
trace.
- Remove leftover "__idx" variable when SRCU was removed from the
tracepoint code
- Add rcu_tasks_trace guard
To add a guard() around the tracepoint code, a rcu_tasks_trace guard
needs to be created first.
- Remove __DO_TRACE() macro and just call __DO_TRACE_CALL() directly
The DO_TRACE() macro has conditional locking depending on what was
passed into the macro parameters. As the guts of the macro has been
moved to __DO_TRACE_CALL() to handle static call logic, there's no
reason to keep the __DO_TRACE() macro around.
It is better to just do the locking in place without the conditionals
and call __DO_TRACE_CALL() from those locations. The "cond" passed in
can also be moved out of that macro. This simplifies the code.
- Remove the "cond" from the system call tracepoint macros
The "cond" variable was added to allow some tracepoints to check a
condition within the static_branch (jump/nop) logic. The system calls
do not need this. Removing it simplifies the code.
- Replace scoped_guard() with just guard() in the tracepoint logic
guard() works just as well as scoped_guard() in the tracepoint logic
and the scoped_guard() causes some issues.
* tag 'trace-v6.13-2' of git://git.kernel.org/pub/scm/linux/kernel/git/trace/linux-trace:
tracing: Use guard() rather than scoped_guard()
tracing: Remove cond argument from __DECLARE_TRACE_SYSCALL
tracing: Remove conditional locking from __DO_TRACE()
rcupdate_trace: Define rcu_tasks_trace lock guard
tracing: Remove __idx variable from __DO_TRACE
tracing: Move it_func[0] comment to the relevant context
tracing: Record task flag NEED_RESCHED_LAZY.
Diffstat (limited to 'include')
-rw-r--r-- | include/linux/rcupdate_trace.h | 5 | ||||
-rw-r--r-- | include/linux/trace_events.h | 1 | ||||
-rw-r--r-- | include/linux/tracepoint.h | 82 |
3 files changed, 36 insertions, 52 deletions
diff --git a/include/linux/rcupdate_trace.h b/include/linux/rcupdate_trace.h index eda493200663..e6c44eb428ab 100644 --- a/include/linux/rcupdate_trace.h +++ b/include/linux/rcupdate_trace.h @@ -10,6 +10,7 @@ #include <linux/sched.h> #include <linux/rcupdate.h> +#include <linux/cleanup.h> extern struct lockdep_map rcu_trace_lock_map; @@ -98,4 +99,8 @@ static inline void rcu_read_lock_trace(void) { BUG(); } static inline void rcu_read_unlock_trace(void) { BUG(); } #endif /* #ifdef CONFIG_TASKS_TRACE_RCU */ +DEFINE_LOCK_GUARD_0(rcu_tasks_trace, + rcu_read_lock_trace(), + rcu_read_unlock_trace()) + #endif /* __LINUX_RCUPDATE_TRACE_H */ diff --git a/include/linux/trace_events.h b/include/linux/trace_events.h index 016b29a56c87..2a5df5b62cfc 100644 --- a/include/linux/trace_events.h +++ b/include/linux/trace_events.h @@ -184,6 +184,7 @@ unsigned int tracing_gen_ctx_irq_test(unsigned int irqs_status); enum trace_flag_type { TRACE_FLAG_IRQS_OFF = 0x01, + TRACE_FLAG_NEED_RESCHED_LAZY = 0x02, TRACE_FLAG_NEED_RESCHED = 0x04, TRACE_FLAG_HARDIRQ = 0x08, TRACE_FLAG_SOFTIRQ = 0x10, diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h index 6073a8c7e38c..76d9055b2cff 100644 --- a/include/linux/tracepoint.h +++ b/include/linux/tracepoint.h @@ -210,36 +210,6 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p) #endif /* CONFIG_HAVE_STATIC_CALL */ /* - * it_func[0] is never NULL because there is at least one element in the array - * when the array itself is non NULL. - * - * With @syscall=0, the tracepoint callback array dereference is - * protected by disabling preemption. - * With @syscall=1, the tracepoint callback array dereference is - * protected by Tasks Trace RCU, which allows probes to handle page - * faults. - */ -#define __DO_TRACE(name, args, cond, syscall) \ - do { \ - int __maybe_unused __idx = 0; \ - \ - if (!(cond)) \ - return; \ - \ - if (syscall) \ - rcu_read_lock_trace(); \ - else \ - preempt_disable_notrace(); \ - \ - __DO_TRACE_CALL(name, TP_ARGS(args)); \ - \ - if (syscall) \ - rcu_read_unlock_trace(); \ - else \ - preempt_enable_notrace(); \ - } while (0) - -/* * Declare an exported function that Rust code can call to trigger this * tracepoint. This function does not include the static branch; that is done * in Rust to avoid a function call when the tracepoint is disabled. @@ -262,7 +232,7 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p) * site if it is not watching, as it will need to be active when the * tracepoint is enabled. */ -#define __DECLARE_TRACE_COMMON(name, proto, args, cond, data_proto) \ +#define __DECLARE_TRACE_COMMON(name, proto, args, data_proto) \ extern int __traceiter_##name(data_proto); \ DECLARE_STATIC_CALL(tp_func_##name, __traceiter_##name); \ extern struct tracepoint __tracepoint_##name; \ @@ -297,41 +267,43 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p) } #define __DECLARE_TRACE(name, proto, args, cond, data_proto) \ - __DECLARE_TRACE_COMMON(name, PARAMS(proto), PARAMS(args), cond, PARAMS(data_proto)) \ + __DECLARE_TRACE_COMMON(name, PARAMS(proto), PARAMS(args), PARAMS(data_proto)) \ static inline void __rust_do_trace_##name(proto) \ { \ - __DO_TRACE(name, \ - TP_ARGS(args), \ - TP_CONDITION(cond), 0); \ + if (cond) { \ + guard(preempt_notrace)(); \ + __DO_TRACE_CALL(name, TP_ARGS(args)); \ + } \ } \ static inline void trace_##name(proto) \ { \ - if (static_branch_unlikely(&__tracepoint_##name.key)) \ - __DO_TRACE(name, \ - TP_ARGS(args), \ - TP_CONDITION(cond), 0); \ + if (static_branch_unlikely(&__tracepoint_##name.key)) { \ + if (cond) { \ + guard(preempt_notrace)(); \ + __DO_TRACE_CALL(name, TP_ARGS(args)); \ + } \ + } \ if (IS_ENABLED(CONFIG_LOCKDEP) && (cond)) { \ WARN_ONCE(!rcu_is_watching(), \ "RCU not watching for tracepoint"); \ } \ } -#define __DECLARE_TRACE_SYSCALL(name, proto, args, cond, data_proto) \ - __DECLARE_TRACE_COMMON(name, PARAMS(proto), PARAMS(args), cond, PARAMS(data_proto)) \ +#define __DECLARE_TRACE_SYSCALL(name, proto, args, data_proto) \ + __DECLARE_TRACE_COMMON(name, PARAMS(proto), PARAMS(args), PARAMS(data_proto)) \ static inline void __rust_do_trace_##name(proto) \ { \ - __DO_TRACE(name, \ - TP_ARGS(args), \ - TP_CONDITION(cond), 1); \ + guard(rcu_tasks_trace)(); \ + __DO_TRACE_CALL(name, TP_ARGS(args)); \ } \ static inline void trace_##name(proto) \ { \ might_fault(); \ - if (static_branch_unlikely(&__tracepoint_##name.key)) \ - __DO_TRACE(name, \ - TP_ARGS(args), \ - TP_CONDITION(cond), 1); \ - if (IS_ENABLED(CONFIG_LOCKDEP) && (cond)) { \ + if (static_branch_unlikely(&__tracepoint_##name.key)) { \ + guard(rcu_tasks_trace)(); \ + __DO_TRACE_CALL(name, TP_ARGS(args)); \ + } \ + if (IS_ENABLED(CONFIG_LOCKDEP)) { \ WARN_ONCE(!rcu_is_watching(), \ "RCU not watching for tracepoint"); \ } \ @@ -341,6 +313,9 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p) * We have no guarantee that gcc and the linker won't up-align the tracepoint * structures, so we create an array of pointers that will be used for iteration * on the tracepoints. + * + * it_func[0] is never NULL because there is at least one element in the array + * when the array itself is non NULL. */ #define __DEFINE_TRACE_EXT(_name, _ext, proto, args) \ static const char __tpstrtab_##_name[] \ @@ -412,7 +387,7 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p) #else /* !TRACEPOINTS_ENABLED */ -#define __DECLARE_TRACE(name, proto, args, cond, data_proto) \ +#define __DECLARE_TRACE_COMMON(name, proto, args, data_proto) \ static inline void trace_##name(proto) \ { } \ static inline int \ @@ -436,7 +411,11 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p) return false; \ } -#define __DECLARE_TRACE_SYSCALL __DECLARE_TRACE +#define __DECLARE_TRACE(name, proto, args, cond, data_proto) \ + __DECLARE_TRACE_COMMON(name, PARAMS(proto), PARAMS(args), PARAMS(data_proto)) + +#define __DECLARE_TRACE_SYSCALL(name, proto, args, data_proto) \ + __DECLARE_TRACE_COMMON(name, PARAMS(proto), PARAMS(args), PARAMS(data_proto)) #define DEFINE_TRACE_FN(name, reg, unreg, proto, args) #define DEFINE_TRACE_SYSCALL(name, reg, unreg, proto, args) @@ -502,7 +481,6 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p) #define DECLARE_TRACE_SYSCALL(name, proto, args) \ __DECLARE_TRACE_SYSCALL(name, PARAMS(proto), PARAMS(args), \ - cpu_online(raw_smp_processor_id()), \ PARAMS(void *__data, proto)) #define TRACE_EVENT_FLAGS(event, flag) |