diff options
Diffstat (limited to 'include')
-rw-r--r-- | include/linux/rcupdate.h | 84 | ||||
-rw-r--r-- | include/linux/rcutiny.h | 1 | ||||
-rw-r--r-- | include/linux/rcutree.h | 1 | ||||
-rw-r--r-- | include/linux/srcu.h | 6 | ||||
-rw-r--r-- | include/linux/srcutree.h | 2 | ||||
-rw-r--r-- | include/linux/timer.h | 2 | ||||
-rw-r--r-- | include/trace/events/rcu.h | 1 |
7 files changed, 56 insertions, 41 deletions
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index 9455476c5ba2..d9680b798b21 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h @@ -315,7 +315,7 @@ static inline int rcu_read_lock_any_held(void) #define RCU_LOCKDEP_WARN(c, s) \ do { \ static bool __section(".data.unlikely") __warned; \ - if (debug_lockdep_rcu_enabled() && !__warned && (c)) { \ + if ((c) && debug_lockdep_rcu_enabled() && !__warned) { \ __warned = true; \ lockdep_rcu_suspicious(__FILE__, __LINE__, s); \ } \ @@ -363,6 +363,20 @@ static inline void rcu_preempt_sleep_check(void) { } #define rcu_check_sparse(p, space) #endif /* #else #ifdef __CHECKER__ */ +/** + * unrcu_pointer - mark a pointer as not being RCU protected + * @p: pointer needing to lose its __rcu property + * + * Converts @p from an __rcu pointer to a __kernel pointer. + * This allows an __rcu pointer to be used with xchg() and friends. + */ +#define unrcu_pointer(p) \ +({ \ + typeof(*p) *_________p1 = (typeof(*p) *__force)(p); \ + rcu_check_sparse(p, __rcu); \ + ((typeof(*p) __force __kernel *)(_________p1)); \ +}) + #define __rcu_access_pointer(p, space) \ ({ \ typeof(*p) *_________p1 = (typeof(*p) *__force)READ_ONCE(p); \ @@ -518,7 +532,12 @@ do { \ * @p: The pointer to read, prior to dereferencing * @c: The conditions under which the dereference will take place * - * This is the RCU-bh counterpart to rcu_dereference_check(). + * This is the RCU-bh counterpart to rcu_dereference_check(). However, + * please note that starting in v5.0 kernels, vanilla RCU grace periods + * wait for local_bh_disable() regions of code in addition to regions of + * code demarked by rcu_read_lock() and rcu_read_unlock(). This means + * that synchronize_rcu(), call_rcu, and friends all take not only + * rcu_read_lock() but also rcu_read_lock_bh() into account. */ #define rcu_dereference_bh_check(p, c) \ __rcu_dereference_check((p), (c) || rcu_read_lock_bh_held(), __rcu) @@ -529,6 +548,11 @@ do { \ * @c: The conditions under which the dereference will take place * * This is the RCU-sched counterpart to rcu_dereference_check(). + * However, please note that starting in v5.0 kernels, vanilla RCU grace + * periods wait for preempt_disable() regions of code in addition to + * regions of code demarked by rcu_read_lock() and rcu_read_unlock(). + * This means that synchronize_rcu(), call_rcu, and friends all take not + * only rcu_read_lock() but also rcu_read_lock_sched() into account. */ #define rcu_dereference_sched_check(p, c) \ __rcu_dereference_check((p), (c) || rcu_read_lock_sched_held(), \ @@ -620,6 +644,12 @@ do { \ * sections, invocation of the corresponding RCU callback is deferred * until after the all the other CPUs exit their critical sections. * + * In v5.0 and later kernels, synchronize_rcu() and call_rcu() also + * wait for regions of code with preemption disabled, including regions of + * code with interrupts or softirqs disabled. In pre-v5.0 kernels, which + * define synchronize_sched(), only code enclosed within rcu_read_lock() + * and rcu_read_unlock() are guaranteed to be waited for. + * * Note, however, that RCU callbacks are permitted to run concurrently * with new RCU read-side critical sections. One way that this can happen * is via the following sequence of events: (1) CPU 0 enters an RCU @@ -672,33 +702,12 @@ static __always_inline void rcu_read_lock(void) /** * rcu_read_unlock() - marks the end of an RCU read-side critical section. * - * In most situations, rcu_read_unlock() is immune from deadlock. - * However, in kernels built with CONFIG_RCU_BOOST, rcu_read_unlock() - * is responsible for deboosting, which it does via rt_mutex_unlock(). - * Unfortunately, this function acquires the scheduler's runqueue and - * priority-inheritance spinlocks. This means that deadlock could result - * if the caller of rcu_read_unlock() already holds one of these locks or - * any lock that is ever acquired while holding them. - * - * That said, RCU readers are never priority boosted unless they were - * preempted. Therefore, one way to avoid deadlock is to make sure - * that preemption never happens within any RCU read-side critical - * section whose outermost rcu_read_unlock() is called with one of - * rt_mutex_unlock()'s locks held. Such preemption can be avoided in - * a number of ways, for example, by invoking preempt_disable() before - * critical section's outermost rcu_read_lock(). - * - * Given that the set of locks acquired by rt_mutex_unlock() might change - * at any time, a somewhat more future-proofed approach is to make sure - * that that preemption never happens within any RCU read-side critical - * section whose outermost rcu_read_unlock() is called with irqs disabled. - * This approach relies on the fact that rt_mutex_unlock() currently only - * acquires irq-disabled locks. - * - * The second of these two approaches is best in most situations, - * however, the first approach can also be useful, at least to those - * developers willing to keep abreast of the set of locks acquired by - * rt_mutex_unlock(). + * In almost all situations, rcu_read_unlock() is immune from deadlock. + * In recent kernels that have consolidated synchronize_sched() and + * synchronize_rcu_bh() into synchronize_rcu(), this deadlock immunity + * also extends to the scheduler's runqueue and priority-inheritance + * spinlocks, courtesy of the quiescent-state deferral that is carried + * out when rcu_read_unlock() is invoked with interrupts disabled. * * See rcu_read_lock() for more information. */ @@ -714,9 +723,11 @@ static inline void rcu_read_unlock(void) /** * rcu_read_lock_bh() - mark the beginning of an RCU-bh critical section * - * This is equivalent of rcu_read_lock(), but also disables softirqs. - * Note that anything else that disables softirqs can also serve as - * an RCU read-side critical section. + * This is equivalent to rcu_read_lock(), but also disables softirqs. + * Note that anything else that disables softirqs can also serve as an RCU + * read-side critical section. However, please note that this equivalence + * applies only to v5.0 and later. Before v5.0, rcu_read_lock() and + * rcu_read_lock_bh() were unrelated. * * Note that rcu_read_lock_bh() and the matching rcu_read_unlock_bh() * must occur in the same context, for example, it is illegal to invoke @@ -749,9 +760,12 @@ static inline void rcu_read_unlock_bh(void) /** * rcu_read_lock_sched() - mark the beginning of a RCU-sched critical section * - * This is equivalent of rcu_read_lock(), but disables preemption. - * Read-side critical sections can also be introduced by anything else - * that disables preemption, including local_irq_disable() and friends. + * This is equivalent to rcu_read_lock(), but also disables preemption. + * Read-side critical sections can also be introduced by anything else that + * disables preemption, including local_irq_disable() and friends. However, + * please note that the equivalence to rcu_read_lock() applies only to + * v5.0 and later. Before v5.0, rcu_read_lock() and rcu_read_lock_sched() + * were unrelated. * * Note that rcu_read_lock_sched() and the matching rcu_read_unlock_sched() * must occur in the same context, for example, it is illegal to invoke diff --git a/include/linux/rcutiny.h b/include/linux/rcutiny.h index 35e0be326ffc..953e70fafe38 100644 --- a/include/linux/rcutiny.h +++ b/include/linux/rcutiny.h @@ -86,7 +86,6 @@ static inline void rcu_irq_enter(void) { } static inline void rcu_irq_exit_irqson(void) { } static inline void rcu_irq_enter_irqson(void) { } static inline void rcu_irq_exit(void) { } -static inline void rcu_irq_exit_preempt(void) { } static inline void rcu_irq_exit_check_preempt(void) { } #define rcu_is_idle_cpu(cpu) \ (is_idle_task(current) && !in_nmi() && !in_irq() && !in_serving_softirq()) diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h index b89b54130f49..53209d669400 100644 --- a/include/linux/rcutree.h +++ b/include/linux/rcutree.h @@ -49,7 +49,6 @@ void rcu_idle_enter(void); void rcu_idle_exit(void); void rcu_irq_enter(void); void rcu_irq_exit(void); -void rcu_irq_exit_preempt(void); void rcu_irq_enter_irqson(void); void rcu_irq_exit_irqson(void); bool rcu_is_idle_cpu(int cpu); diff --git a/include/linux/srcu.h b/include/linux/srcu.h index a0895bbf71ce..e6011a9975af 100644 --- a/include/linux/srcu.h +++ b/include/linux/srcu.h @@ -64,6 +64,12 @@ unsigned long get_state_synchronize_srcu(struct srcu_struct *ssp); unsigned long start_poll_synchronize_srcu(struct srcu_struct *ssp); bool poll_state_synchronize_srcu(struct srcu_struct *ssp, unsigned long cookie); +#ifdef CONFIG_SRCU +void srcu_init(void); +#else /* #ifdef CONFIG_SRCU */ +static inline void srcu_init(void) { } +#endif /* #else #ifdef CONFIG_SRCU */ + #ifdef CONFIG_DEBUG_LOCK_ALLOC /** diff --git a/include/linux/srcutree.h b/include/linux/srcutree.h index 9cfcc8a756ae..cb1f4351e8ba 100644 --- a/include/linux/srcutree.h +++ b/include/linux/srcutree.h @@ -82,9 +82,7 @@ struct srcu_struct { /* callback for the barrier */ /* operation. */ struct delayed_work work; -#ifdef CONFIG_DEBUG_LOCK_ALLOC struct lockdep_map dep_map; -#endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ }; /* Values for state variable (bottom bits of ->srcu_gp_seq). */ diff --git a/include/linux/timer.h b/include/linux/timer.h index 4118a97e62fb..fda13c9d1256 100644 --- a/include/linux/timer.h +++ b/include/linux/timer.h @@ -192,8 +192,6 @@ extern int try_to_del_timer_sync(struct timer_list *timer); #define del_singleshot_timer_sync(t) del_timer_sync(t) -extern bool timer_curr_running(struct timer_list *timer); - extern void init_timers(void); struct hrtimer; extern enum hrtimer_restart it_real_fn(struct hrtimer *); diff --git a/include/trace/events/rcu.h b/include/trace/events/rcu.h index 6768b64bc738..670e41783edd 100644 --- a/include/trace/events/rcu.h +++ b/include/trace/events/rcu.h @@ -278,6 +278,7 @@ TRACE_EVENT_RCU(rcu_exp_funnel_lock, * "WakeNot": Don't wake rcuo kthread. * "WakeNotPoll": Don't wake rcuo kthread because it is polling. * "WakeOvfIsDeferred": Wake rcuo kthread later, CB list is huge. + * "WakeBypassIsDeferred": Wake rcuo kthread later, bypass list is contended. * "WokeEmpty": rcuo CB kthread woke to find empty list. */ TRACE_EVENT_RCU(rcu_nocb_wake, |