diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2022-08-02 19:12:45 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2022-08-02 19:12:45 -0700 |
commit | 7d9d077c783e33995c80d8b28fea1a98161934f4 (patch) | |
tree | 5404e84d237b764a07aaf299d175c1dcfbad9966 /include | |
parent | c2a24a7a036b3bd3a2e6c66730dfc777cae6540a (diff) | |
parent | 34bc7b454dc31f75a0be7ee8ab378135523d7c51 (diff) |
Merge tag 'rcu.2022.07.26a' of git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu
Pull RCU updates from Paul McKenney:
- Documentation updates
- Miscellaneous fixes
- Callback-offload updates, perhaps most notably a new
RCU_NOCB_CPU_DEFAULT_ALL Kconfig option that causes all CPUs to be
offloaded at boot time, regardless of kernel boot parameters.
This is useful to battery-powered systems such as ChromeOS and
Android. In addition, a new RCU_NOCB_CPU_CB_BOOST kernel boot
parameter prevents offloaded callbacks from interfering with
real-time workloads and with energy-efficiency mechanisms
- Polled grace-period updates, perhaps most notably making these APIs
account for both normal and expedited grace periods
- Tasks RCU updates, perhaps most notably reducing the CPU overhead of
RCU tasks trace grace periods by more than a factor of two on a
system with 15,000 tasks.
The reduction is expected to increase with the number of tasks, so it
seems reasonable to hypothesize that a system with 150,000 tasks
might see a 20-fold reduction in CPU overhead
- Torture-test updates
- Updates that merge RCU's dyntick-idle tracking into context tracking,
thus reducing the overhead of transitioning to kernel mode from
either idle or nohz_full userspace execution for kernels that track
context independently of RCU.
This is expected to be helpful primarily for kernels built with
CONFIG_NO_HZ_FULL=y
* tag 'rcu.2022.07.26a' of git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu: (98 commits)
rcu: Add irqs-disabled indicator to expedited RCU CPU stall warnings
rcu: Diagnose extended sync_rcu_do_polled_gp() loops
rcu: Put panic_on_rcu_stall() after expedited RCU CPU stall warnings
rcutorture: Test polled expedited grace-period primitives
rcu: Add polled expedited grace-period primitives
rcutorture: Verify that polled GP API sees synchronous grace periods
rcu: Make Tiny RCU grace periods visible to polled APIs
rcu: Make polled grace-period API account for expedited grace periods
rcu: Switch polled grace-period APIs to ->gp_seq_polled
rcu/nocb: Avoid polling when my_rdp->nocb_head_rdp list is empty
rcu/nocb: Add option to opt rcuo kthreads out of RT priority
rcu: Add nocb_cb_kthread check to rcu_is_callbacks_kthread()
rcu/nocb: Add an option to offload all CPUs on boot
rcu/nocb: Fix NOCB kthreads spawn failure with rcu_nocb_rdp_deoffload() direct call
rcu/nocb: Invert rcu_state.barrier_mutex VS hotplug lock locking order
rcu/nocb: Add/del rdp to iterate from rcuog itself
rcu/tree: Add comment to describe GP-done condition in fqs loop
rcu: Initialize first_gp_fqs at declaration in rcu_gp_fqs()
rcu/kvfree: Remove useless monitor_todo flag
rcu: Cleanup RCU urgency state for offline CPU
...
Diffstat (limited to 'include')
-rw-r--r-- | include/linux/context_tracking.h | 95 | ||||
-rw-r--r-- | include/linux/context_tracking_irq.h | 21 | ||||
-rw-r--r-- | include/linux/context_tracking_state.h | 113 | ||||
-rw-r--r-- | include/linux/entry-common.h | 10 | ||||
-rw-r--r-- | include/linux/hardirq.h | 12 | ||||
-rw-r--r-- | include/linux/rcupdate.h | 45 | ||||
-rw-r--r-- | include/linux/rcupdate_trace.h | 2 | ||||
-rw-r--r-- | include/linux/rcutiny.h | 27 | ||||
-rw-r--r-- | include/linux/rcutree.h | 11 | ||||
-rw-r--r-- | include/linux/sched.h | 4 | ||||
-rw-r--r-- | include/linux/tracepoint.h | 4 |
11 files changed, 245 insertions, 99 deletions
diff --git a/include/linux/context_tracking.h b/include/linux/context_tracking.h index 7a14807c9d1a..dcef4a9e4d63 100644 --- a/include/linux/context_tracking.h +++ b/include/linux/context_tracking.h @@ -10,71 +10,72 @@ #include <asm/ptrace.h> -#ifdef CONFIG_CONTEXT_TRACKING -extern void context_tracking_cpu_set(int cpu); +#ifdef CONFIG_CONTEXT_TRACKING_USER +extern void ct_cpu_track_user(int cpu); /* Called with interrupts disabled. */ -extern void __context_tracking_enter(enum ctx_state state); -extern void __context_tracking_exit(enum ctx_state state); +extern void __ct_user_enter(enum ctx_state state); +extern void __ct_user_exit(enum ctx_state state); -extern void context_tracking_enter(enum ctx_state state); -extern void context_tracking_exit(enum ctx_state state); -extern void context_tracking_user_enter(void); -extern void context_tracking_user_exit(void); +extern void ct_user_enter(enum ctx_state state); +extern void ct_user_exit(enum ctx_state state); + +extern void user_enter_callable(void); +extern void user_exit_callable(void); static inline void user_enter(void) { if (context_tracking_enabled()) - context_tracking_enter(CONTEXT_USER); + ct_user_enter(CONTEXT_USER); } static inline void user_exit(void) { if (context_tracking_enabled()) - context_tracking_exit(CONTEXT_USER); + ct_user_exit(CONTEXT_USER); } /* Called with interrupts disabled. */ static __always_inline void user_enter_irqoff(void) { if (context_tracking_enabled()) - __context_tracking_enter(CONTEXT_USER); + __ct_user_enter(CONTEXT_USER); } static __always_inline void user_exit_irqoff(void) { if (context_tracking_enabled()) - __context_tracking_exit(CONTEXT_USER); + __ct_user_exit(CONTEXT_USER); } static inline enum ctx_state exception_enter(void) { enum ctx_state prev_ctx; - if (IS_ENABLED(CONFIG_HAVE_CONTEXT_TRACKING_OFFSTACK) || + if (IS_ENABLED(CONFIG_HAVE_CONTEXT_TRACKING_USER_OFFSTACK) || !context_tracking_enabled()) return 0; - prev_ctx = this_cpu_read(context_tracking.state); + prev_ctx = __ct_state(); if (prev_ctx != CONTEXT_KERNEL) - context_tracking_exit(prev_ctx); + ct_user_exit(prev_ctx); return prev_ctx; } static inline void exception_exit(enum ctx_state prev_ctx) { - if (!IS_ENABLED(CONFIG_HAVE_CONTEXT_TRACKING_OFFSTACK) && + if (!IS_ENABLED(CONFIG_HAVE_CONTEXT_TRACKING_USER_OFFSTACK) && context_tracking_enabled()) { if (prev_ctx != CONTEXT_KERNEL) - context_tracking_enter(prev_ctx); + ct_user_enter(prev_ctx); } } static __always_inline bool context_tracking_guest_enter(void) { if (context_tracking_enabled()) - __context_tracking_enter(CONTEXT_GUEST); + __ct_user_enter(CONTEXT_GUEST); return context_tracking_enabled_this_cpu(); } @@ -82,40 +83,56 @@ static __always_inline bool context_tracking_guest_enter(void) static __always_inline void context_tracking_guest_exit(void) { if (context_tracking_enabled()) - __context_tracking_exit(CONTEXT_GUEST); + __ct_user_exit(CONTEXT_GUEST); } -/** - * ct_state() - return the current context tracking state if known - * - * Returns the current cpu's context tracking state if context tracking - * is enabled. If context tracking is disabled, returns - * CONTEXT_DISABLED. This should be used primarily for debugging. - */ -static __always_inline enum ctx_state ct_state(void) -{ - return context_tracking_enabled() ? - this_cpu_read(context_tracking.state) : CONTEXT_DISABLED; -} +#define CT_WARN_ON(cond) WARN_ON(context_tracking_enabled() && (cond)) + #else static inline void user_enter(void) { } static inline void user_exit(void) { } static inline void user_enter_irqoff(void) { } static inline void user_exit_irqoff(void) { } -static inline enum ctx_state exception_enter(void) { return 0; } +static inline int exception_enter(void) { return 0; } static inline void exception_exit(enum ctx_state prev_ctx) { } -static inline enum ctx_state ct_state(void) { return CONTEXT_DISABLED; } +static inline int ct_state(void) { return -1; } static __always_inline bool context_tracking_guest_enter(void) { return false; } static inline void context_tracking_guest_exit(void) { } +#define CT_WARN_ON(cond) do { } while (0) +#endif /* !CONFIG_CONTEXT_TRACKING_USER */ -#endif /* !CONFIG_CONTEXT_TRACKING */ - -#define CT_WARN_ON(cond) WARN_ON(context_tracking_enabled() && (cond)) - -#ifdef CONFIG_CONTEXT_TRACKING_FORCE +#ifdef CONFIG_CONTEXT_TRACKING_USER_FORCE extern void context_tracking_init(void); #else static inline void context_tracking_init(void) { } -#endif /* CONFIG_CONTEXT_TRACKING_FORCE */ +#endif /* CONFIG_CONTEXT_TRACKING_USER_FORCE */ + +#ifdef CONFIG_CONTEXT_TRACKING_IDLE +extern void ct_idle_enter(void); +extern void ct_idle_exit(void); + +/* + * Is the current CPU in an extended quiescent state? + * + * No ordering, as we are sampling CPU-local information. + */ +static __always_inline bool rcu_dynticks_curr_cpu_in_eqs(void) +{ + return !(arch_atomic_read(this_cpu_ptr(&context_tracking.state)) & RCU_DYNTICKS_IDX); +} + +/* + * Increment the current CPU's context_tracking structure's ->state field + * with ordering. Return the new value. + */ +static __always_inline unsigned long ct_state_inc(int incby) +{ + return arch_atomic_add_return(incby, this_cpu_ptr(&context_tracking.state)); +} + +#else +static inline void ct_idle_enter(void) { } +static inline void ct_idle_exit(void) { } +#endif /* !CONFIG_CONTEXT_TRACKING_IDLE */ #endif diff --git a/include/linux/context_tracking_irq.h b/include/linux/context_tracking_irq.h new file mode 100644 index 000000000000..c50b5670c4a5 --- /dev/null +++ b/include/linux/context_tracking_irq.h @@ -0,0 +1,21 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_CONTEXT_TRACKING_IRQ_H +#define _LINUX_CONTEXT_TRACKING_IRQ_H + +#ifdef CONFIG_CONTEXT_TRACKING_IDLE +void ct_irq_enter(void); +void ct_irq_exit(void); +void ct_irq_enter_irqson(void); +void ct_irq_exit_irqson(void); +void ct_nmi_enter(void); +void ct_nmi_exit(void); +#else +static inline void ct_irq_enter(void) { } +static inline void ct_irq_exit(void) { } +static inline void ct_irq_enter_irqson(void) { } +static inline void ct_irq_exit_irqson(void) { } +static inline void ct_nmi_enter(void) { } +static inline void ct_nmi_exit(void) { } +#endif + +#endif diff --git a/include/linux/context_tracking_state.h b/include/linux/context_tracking_state.h index ae1e63e26947..4a4d56f77180 100644 --- a/include/linux/context_tracking_state.h +++ b/include/linux/context_tracking_state.h @@ -4,8 +4,28 @@ #include <linux/percpu.h> #include <linux/static_key.h> +#include <linux/context_tracking_irq.h> + +/* Offset to allow distinguishing irq vs. task-based idle entry/exit. */ +#define DYNTICK_IRQ_NONIDLE ((LONG_MAX / 2) + 1) + +enum ctx_state { + CONTEXT_DISABLED = -1, /* returned by ct_state() if unknown */ + CONTEXT_KERNEL = 0, + CONTEXT_IDLE = 1, + CONTEXT_USER = 2, + CONTEXT_GUEST = 3, + CONTEXT_MAX = 4, +}; + +/* Even value for idle, else odd. */ +#define RCU_DYNTICKS_IDX CONTEXT_MAX + +#define CT_STATE_MASK (CONTEXT_MAX - 1) +#define CT_DYNTICKS_MASK (~CT_STATE_MASK) struct context_tracking { +#ifdef CONFIG_CONTEXT_TRACKING_USER /* * When active is false, probes are unset in order * to minimize overhead: TIF flags are cleared @@ -14,18 +34,73 @@ struct context_tracking { */ bool active; int recursion; - enum ctx_state { - CONTEXT_DISABLED = -1, /* returned by ct_state() if unknown */ - CONTEXT_KERNEL = 0, - CONTEXT_USER, - CONTEXT_GUEST, - } state; +#endif +#ifdef CONFIG_CONTEXT_TRACKING + atomic_t state; +#endif +#ifdef CONFIG_CONTEXT_TRACKING_IDLE + long dynticks_nesting; /* Track process nesting level. */ + long dynticks_nmi_nesting; /* Track irq/NMI nesting level. */ +#endif }; #ifdef CONFIG_CONTEXT_TRACKING -extern struct static_key_false context_tracking_key; DECLARE_PER_CPU(struct context_tracking, context_tracking); +static __always_inline int __ct_state(void) +{ + return arch_atomic_read(this_cpu_ptr(&context_tracking.state)) & CT_STATE_MASK; +} +#endif + +#ifdef CONFIG_CONTEXT_TRACKING_IDLE +static __always_inline int ct_dynticks(void) +{ + return atomic_read(this_cpu_ptr(&context_tracking.state)) & CT_DYNTICKS_MASK; +} + +static __always_inline int ct_dynticks_cpu(int cpu) +{ + struct context_tracking *ct = per_cpu_ptr(&context_tracking, cpu); + + return atomic_read(&ct->state) & CT_DYNTICKS_MASK; +} + +static __always_inline int ct_dynticks_cpu_acquire(int cpu) +{ + struct context_tracking *ct = per_cpu_ptr(&context_tracking, cpu); + + return atomic_read_acquire(&ct->state) & CT_DYNTICKS_MASK; +} + +static __always_inline long ct_dynticks_nesting(void) +{ + return __this_cpu_read(context_tracking.dynticks_nesting); +} + +static __always_inline long ct_dynticks_nesting_cpu(int cpu) +{ + struct context_tracking *ct = per_cpu_ptr(&context_tracking, cpu); + + return ct->dynticks_nesting; +} + +static __always_inline long ct_dynticks_nmi_nesting(void) +{ + return __this_cpu_read(context_tracking.dynticks_nmi_nesting); +} + +static __always_inline long ct_dynticks_nmi_nesting_cpu(int cpu) +{ + struct context_tracking *ct = per_cpu_ptr(&context_tracking, cpu); + + return ct->dynticks_nmi_nesting; +} +#endif /* #ifdef CONFIG_CONTEXT_TRACKING_IDLE */ + +#ifdef CONFIG_CONTEXT_TRACKING_USER +extern struct static_key_false context_tracking_key; + static __always_inline bool context_tracking_enabled(void) { return static_branch_unlikely(&context_tracking_key); @@ -41,15 +116,31 @@ static inline bool context_tracking_enabled_this_cpu(void) return context_tracking_enabled() && __this_cpu_read(context_tracking.active); } -static __always_inline bool context_tracking_in_user(void) +/** + * ct_state() - return the current context tracking state if known + * + * Returns the current cpu's context tracking state if context tracking + * is enabled. If context tracking is disabled, returns + * CONTEXT_DISABLED. This should be used primarily for debugging. + */ +static __always_inline int ct_state(void) { - return __this_cpu_read(context_tracking.state) == CONTEXT_USER; + int ret; + + if (!context_tracking_enabled()) + return CONTEXT_DISABLED; + + preempt_disable(); + ret = __ct_state(); + preempt_enable(); + + return ret; } + #else -static __always_inline bool context_tracking_in_user(void) { return false; } static __always_inline bool context_tracking_enabled(void) { return false; } static __always_inline bool context_tracking_enabled_cpu(int cpu) { return false; } static __always_inline bool context_tracking_enabled_this_cpu(void) { return false; } -#endif /* CONFIG_CONTEXT_TRACKING */ +#endif /* CONFIG_CONTEXT_TRACKING_USER */ #endif diff --git a/include/linux/entry-common.h b/include/linux/entry-common.h index c92ac75d6556..84a466b176cf 100644 --- a/include/linux/entry-common.h +++ b/include/linux/entry-common.h @@ -357,7 +357,7 @@ void irqentry_exit_to_user_mode(struct pt_regs *regs); /** * struct irqentry_state - Opaque object for exception state storage * @exit_rcu: Used exclusively in the irqentry_*() calls; signals whether the - * exit path has to invoke rcu_irq_exit(). + * exit path has to invoke ct_irq_exit(). * @lockdep: Used exclusively in the irqentry_nmi_*() calls; ensures that * lockdep state is restored correctly on exit from nmi. * @@ -395,12 +395,12 @@ typedef struct irqentry_state { * * For kernel mode entries RCU handling is done conditional. If RCU is * watching then the only RCU requirement is to check whether the tick has - * to be restarted. If RCU is not watching then rcu_irq_enter() has to be - * invoked on entry and rcu_irq_exit() on exit. + * to be restarted. If RCU is not watching then ct_irq_enter() has to be + * invoked on entry and ct_irq_exit() on exit. * - * Avoiding the rcu_irq_enter/exit() calls is an optimization but also + * Avoiding the ct_irq_enter/exit() calls is an optimization but also * solves the problem of kernel mode pagefaults which can schedule, which - * is not possible after invoking rcu_irq_enter() without undoing it. + * is not possible after invoking ct_irq_enter() without undoing it. * * For user mode entries irqentry_enter_from_user_mode() is invoked to * establish the proper context for NOHZ_FULL. Otherwise scheduling on exit diff --git a/include/linux/hardirq.h b/include/linux/hardirq.h index 76878b357ffa..d57cab4d4c06 100644 --- a/include/linux/hardirq.h +++ b/include/linux/hardirq.h @@ -92,14 +92,6 @@ void irq_exit_rcu(void); #define arch_nmi_exit() do { } while (0) #endif -#ifdef CONFIG_TINY_RCU -static inline void rcu_nmi_enter(void) { } -static inline void rcu_nmi_exit(void) { } -#else -extern void rcu_nmi_enter(void); -extern void rcu_nmi_exit(void); -#endif - /* * NMI vs Tracing * -------------- @@ -124,7 +116,7 @@ extern void rcu_nmi_exit(void); do { \ __nmi_enter(); \ lockdep_hardirq_enter(); \ - rcu_nmi_enter(); \ + ct_nmi_enter(); \ instrumentation_begin(); \ ftrace_nmi_enter(); \ instrumentation_end(); \ @@ -143,7 +135,7 @@ extern void rcu_nmi_exit(void); instrumentation_begin(); \ ftrace_nmi_exit(); \ instrumentation_end(); \ - rcu_nmi_exit(); \ + ct_nmi_exit(); \ lockdep_hardirq_exit(); \ __nmi_exit(); \ } while (0) diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index 1a32036c918c..f527f27e6438 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h @@ -29,6 +29,7 @@ #include <linux/lockdep.h> #include <asm/processor.h> #include <linux/cpumask.h> +#include <linux/context_tracking_irq.h> #define ULONG_CMP_GE(a, b) (ULONG_MAX / 2 >= (a) - (b)) #define ULONG_CMP_LT(a, b) (ULONG_MAX / 2 < (a) - (b)) @@ -41,6 +42,7 @@ void call_rcu(struct rcu_head *head, rcu_callback_t func); void rcu_barrier_tasks(void); void rcu_barrier_tasks_rude(void); void synchronize_rcu(void); +unsigned long get_completed_synchronize_rcu(void); #ifdef CONFIG_PREEMPT_RCU @@ -103,13 +105,11 @@ static inline void rcu_sysrq_start(void) { } static inline void rcu_sysrq_end(void) { } #endif /* #else #ifdef CONFIG_RCU_STALL_COMMON */ -#ifdef CONFIG_NO_HZ_FULL -void rcu_user_enter(void); -void rcu_user_exit(void); +#if defined(CONFIG_NO_HZ_FULL) && (!defined(CONFIG_GENERIC_ENTRY) || !defined(CONFIG_KVM_XFER_TO_GUEST_WORK)) +void rcu_irq_work_resched(void); #else -static inline void rcu_user_enter(void) { } -static inline void rcu_user_exit(void) { } -#endif /* CONFIG_NO_HZ_FULL */ +static inline void rcu_irq_work_resched(void) { } +#endif #ifdef CONFIG_RCU_NOCB_CPU void rcu_init_nohz(void); @@ -128,7 +128,7 @@ static inline void rcu_nocb_flush_deferred_wakeup(void) { } * @a: Code that RCU needs to pay attention to. * * RCU read-side critical sections are forbidden in the inner idle loop, - * that is, between the rcu_idle_enter() and the rcu_idle_exit() -- RCU + * that is, between the ct_idle_enter() and the ct_idle_exit() -- RCU * will happily ignore any such read-side critical sections. However, * things like powertop need tracepoints in the inner idle loop. * @@ -143,9 +143,9 @@ static inline void rcu_nocb_flush_deferred_wakeup(void) { } */ #define RCU_NONIDLE(a) \ do { \ - rcu_irq_enter_irqson(); \ + ct_irq_enter_irqson(); \ do { a; } while (0); \ - rcu_irq_exit_irqson(); \ + ct_irq_exit_irqson(); \ } while (0) /* @@ -169,13 +169,24 @@ void synchronize_rcu_tasks(void); # endif # ifdef CONFIG_TASKS_TRACE_RCU -# define rcu_tasks_trace_qs(t) \ - do { \ - if (!likely(READ_ONCE((t)->trc_reader_checked)) && \ - !unlikely(READ_ONCE((t)->trc_reader_nesting))) { \ - smp_store_release(&(t)->trc_reader_checked, true); \ - smp_mb(); /* Readers partitioned by store. */ \ - } \ +// Bits for ->trc_reader_special.b.need_qs field. +#define TRC_NEED_QS 0x1 // Task needs a quiescent state. +#define TRC_NEED_QS_CHECKED 0x2 // Task has been checked for needing quiescent state. + +u8 rcu_trc_cmpxchg_need_qs(struct task_struct *t, u8 old, u8 new); +void rcu_tasks_trace_qs_blkd(struct task_struct *t); + +# define rcu_tasks_trace_qs(t) \ + do { \ + int ___rttq_nesting = READ_ONCE((t)->trc_reader_nesting); \ + \ + if (likely(!READ_ONCE((t)->trc_reader_special.b.need_qs)) && \ + likely(!___rttq_nesting)) { \ + rcu_trc_cmpxchg_need_qs((t), 0, TRC_NEED_QS_CHECKED); \ + } else if (___rttq_nesting && ___rttq_nesting != INT_MIN && \ + !READ_ONCE((t)->trc_reader_special.b.blocked)) { \ + rcu_tasks_trace_qs_blkd(t); \ + } \ } while (0) # else # define rcu_tasks_trace_qs(t) do { } while (0) @@ -184,7 +195,7 @@ void synchronize_rcu_tasks(void); #define rcu_tasks_qs(t, preempt) \ do { \ rcu_tasks_classic_qs((t), (preempt)); \ - rcu_tasks_trace_qs((t)); \ + rcu_tasks_trace_qs(t); \ } while (0) # ifdef CONFIG_TASKS_RUDE_RCU diff --git a/include/linux/rcupdate_trace.h b/include/linux/rcupdate_trace.h index 6f9c35817398..9bc8cbb33340 100644 --- a/include/linux/rcupdate_trace.h +++ b/include/linux/rcupdate_trace.h @@ -75,7 +75,7 @@ static inline void rcu_read_unlock_trace(void) nesting = READ_ONCE(t->trc_reader_nesting) - 1; barrier(); // Critical section before disabling. // Disable IPI-based setting of .need_qs. - WRITE_ONCE(t->trc_reader_nesting, INT_MIN); + WRITE_ONCE(t->trc_reader_nesting, INT_MIN + nesting); if (likely(!READ_ONCE(t->trc_reader_special.s)) || nesting) { WRITE_ONCE(t->trc_reader_nesting, nesting); return; // We assume shallow reader nesting. diff --git a/include/linux/rcutiny.h b/include/linux/rcutiny.h index 5fed476f977f..62815c0a2dce 100644 --- a/include/linux/rcutiny.h +++ b/include/linux/rcutiny.h @@ -23,6 +23,16 @@ static inline void cond_synchronize_rcu(unsigned long oldstate) might_sleep(); } +static inline unsigned long start_poll_synchronize_rcu_expedited(void) +{ + return start_poll_synchronize_rcu(); +} + +static inline void cond_synchronize_rcu_expedited(unsigned long oldstate) +{ + cond_synchronize_rcu(oldstate); +} + extern void rcu_barrier(void); static inline void synchronize_rcu_expedited(void) @@ -38,7 +48,7 @@ static inline void synchronize_rcu_expedited(void) */ extern void kvfree(const void *addr); -static inline void kvfree_call_rcu(struct rcu_head *head, rcu_callback_t func) +static inline void __kvfree_call_rcu(struct rcu_head *head, rcu_callback_t func) { if (head) { call_rcu(head, func); @@ -51,6 +61,15 @@ static inline void kvfree_call_rcu(struct rcu_head *head, rcu_callback_t func) kvfree((void *) func); } +#ifdef CONFIG_KASAN_GENERIC +void kvfree_call_rcu(struct rcu_head *head, rcu_callback_t func); +#else +static inline void kvfree_call_rcu(struct rcu_head *head, rcu_callback_t func) +{ + __kvfree_call_rcu(head, func); +} +#endif + void rcu_qs(void); static inline void rcu_softirq_qs(void) @@ -76,12 +95,6 @@ static inline int rcu_needs_cpu(void) static inline void rcu_virt_note_context_switch(int cpu) { } static inline void rcu_cpu_stall_reset(void) { } static inline int rcu_jiffies_till_stall_check(void) { return 21 * HZ; } -static inline void rcu_idle_enter(void) { } -static inline void rcu_idle_exit(void) { } -static inline void rcu_irq_enter(void) { } -static inline void rcu_irq_exit_irqson(void) { } -static inline void rcu_irq_enter_irqson(void) { } -static inline void rcu_irq_exit(void) { } static inline void rcu_irq_exit_check_preempt(void) { } #define rcu_is_idle_cpu(cpu) \ (is_idle_task(current) && !in_nmi() && !in_hardirq() && !in_serving_softirq()) diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h index 9c6cfb742504..47eaa4cb0df7 100644 --- a/include/linux/rcutree.h +++ b/include/linux/rcutree.h @@ -40,17 +40,13 @@ bool rcu_eqs_special_set(int cpu); void rcu_momentary_dyntick_idle(void); void kfree_rcu_scheduler_running(void); bool rcu_gp_might_be_stalled(void); +unsigned long start_poll_synchronize_rcu_expedited(void); +void cond_synchronize_rcu_expedited(unsigned long oldstate); unsigned long get_state_synchronize_rcu(void); unsigned long start_poll_synchronize_rcu(void); bool poll_state_synchronize_rcu(unsigned long oldstate); void cond_synchronize_rcu(unsigned long oldstate); -void rcu_idle_enter(void); -void rcu_idle_exit(void); -void rcu_irq_enter(void); -void rcu_irq_exit(void); -void rcu_irq_enter_irqson(void); -void rcu_irq_exit_irqson(void); bool rcu_is_idle_cpu(int cpu); #ifdef CONFIG_PROVE_RCU @@ -59,6 +55,9 @@ void rcu_irq_exit_check_preempt(void); static inline void rcu_irq_exit_check_preempt(void) { } #endif +struct task_struct; +void rcu_preempt_deferred_qs(struct task_struct *t); + void exit_rcu(void); void rcu_scheduler_starting(void); diff --git a/include/linux/sched.h b/include/linux/sched.h index 88b8817b827d..d6b0866c71ed 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -843,8 +843,9 @@ struct task_struct { int trc_reader_nesting; int trc_ipi_to_cpu; union rcu_special trc_reader_special; - bool trc_reader_checked; struct list_head trc_holdout_list; + struct list_head trc_blkd_node; + int trc_blkd_cpu; #endif /* #ifdef CONFIG_TASKS_TRACE_RCU */ struct sched_info sched_info; @@ -2223,6 +2224,7 @@ static inline void set_task_cpu(struct task_struct *p, unsigned int cpu) extern bool sched_task_on_rq(struct task_struct *p); extern unsigned long get_wchan(struct task_struct *p); +extern struct task_struct *cpu_curr_snapshot(int cpu); /* * In order to reduce various lock holder preemption latencies provide an diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h index 28031b15f878..55717a2eda08 100644 --- a/include/linux/tracepoint.h +++ b/include/linux/tracepoint.h @@ -200,13 +200,13 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p) */ \ if (rcuidle) { \ __idx = srcu_read_lock_notrace(&tracepoint_srcu);\ - rcu_irq_enter_irqson(); \ + ct_irq_enter_irqson(); \ } \ \ __DO_TRACE_CALL(name, TP_ARGS(args)); \ \ if (rcuidle) { \ - rcu_irq_exit_irqson(); \ + ct_irq_exit_irqson(); \ srcu_read_unlock_notrace(&tracepoint_srcu, __idx);\ } \ \ |