diff options
author | Paul E. McKenney <paulmck@linux.ibm.com> | 2019-04-12 15:58:34 -0700 |
---|---|---|
committer | Paul E. McKenney <paulmck@linux.ibm.com> | 2019-08-13 14:35:49 -0700 |
commit | ce5215c1342c6c89b3c3c45fea82cddf0b013787 (patch) | |
tree | 06943157837341c47eadb469cd225f5966f4c9ed /kernel/rcu | |
parent | 1bb5f9b95afe5d9d6b586389ce5e8f461a5b671c (diff) |
rcu/nocb: Use separate flag to indicate offloaded ->cblist
RCU callback processing currently uses rcu_is_nocb_cpu() to determine
whether or not the current CPU's callbacks are to be offloaded.
This works, but it is not so good for cache locality. Plus use of
->cblist for offloaded callbacks will greatly increase the frequency
of these checks. This commit therefore adds a ->offloaded flag to the
rcu_segcblist structure to provide a more flexible and cache-friendly
means of checking for callback offloading.
Signed-off-by: Paul E. McKenney <paulmck@linux.ibm.com>
Diffstat (limited to 'kernel/rcu')
-rw-r--r-- | kernel/rcu/rcu_segcblist.c | 12 | ||||
-rw-r--r-- | kernel/rcu/rcu_segcblist.h | 7 | ||||
-rw-r--r-- | kernel/rcu/tree.c | 10 | ||||
-rw-r--r-- | kernel/rcu/tree_plugin.h | 11 |
4 files changed, 32 insertions, 8 deletions
diff --git a/kernel/rcu/rcu_segcblist.c b/kernel/rcu/rcu_segcblist.c index b305dcac34c9..700779f4c0cb 100644 --- a/kernel/rcu/rcu_segcblist.c +++ b/kernel/rcu/rcu_segcblist.c @@ -74,6 +74,18 @@ void rcu_segcblist_disable(struct rcu_segcblist *rsclp) } /* + * Mark the specified rcu_segcblist structure as offloaded. This + * structure must be empty. + */ +void rcu_segcblist_offload(struct rcu_segcblist *rsclp) +{ + WARN_ON_ONCE(!rcu_segcblist_empty(rsclp)); + WARN_ON_ONCE(rcu_segcblist_n_cbs(rsclp)); + WARN_ON_ONCE(rcu_segcblist_n_lazy_cbs(rsclp)); + rsclp->offloaded = 1; +} + +/* * Does the specified rcu_segcblist structure contain callbacks that * are ready to be invoked? */ diff --git a/kernel/rcu/rcu_segcblist.h b/kernel/rcu/rcu_segcblist.h index b2de7b32da29..8f3783391075 100644 --- a/kernel/rcu/rcu_segcblist.h +++ b/kernel/rcu/rcu_segcblist.h @@ -66,6 +66,12 @@ static inline bool rcu_segcblist_is_enabled(struct rcu_segcblist *rsclp) return rsclp->enabled; } +/* Is the specified rcu_segcblist offloaded? */ +static inline bool rcu_segcblist_is_offloaded(struct rcu_segcblist *rsclp) +{ + return rsclp->offloaded; +} + /* * Are all segments following the specified segment of the specified * rcu_segcblist structure empty of callbacks? (The specified @@ -78,6 +84,7 @@ static inline bool rcu_segcblist_restempty(struct rcu_segcblist *rsclp, int seg) void rcu_segcblist_init(struct rcu_segcblist *rsclp); void rcu_segcblist_disable(struct rcu_segcblist *rsclp); +void rcu_segcblist_offload(struct rcu_segcblist *rsclp); bool rcu_segcblist_ready_cbs(struct rcu_segcblist *rsclp); bool rcu_segcblist_pend_cbs(struct rcu_segcblist *rsclp); struct rcu_head *rcu_segcblist_first_cb(struct rcu_segcblist *rsclp); diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index a14e5fbbea46..6f5c96c4f9a3 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -2858,10 +2858,11 @@ void rcu_barrier(void) * corresponding CPU's preceding callbacks have been invoked. */ for_each_possible_cpu(cpu) { - if (!cpu_online(cpu) && !rcu_is_nocb_cpu(cpu)) - continue; rdp = per_cpu_ptr(&rcu_data, cpu); - if (rcu_is_nocb_cpu(cpu)) { + if (!cpu_online(cpu) && + !rcu_segcblist_is_offloaded(&rdp->cblist)) + continue; + if (rcu_segcblist_is_offloaded(&rdp->cblist)) { if (!rcu_nocb_cpu_needs_barrier(cpu)) { rcu_barrier_trace(TPS("OfflineNoCB"), cpu, rcu_state.barrier_sequence); @@ -3155,7 +3156,8 @@ void rcutree_migrate_callbacks(int cpu) struct rcu_node *rnp_root = rcu_get_root(); bool needwake; - if (rcu_is_nocb_cpu(cpu) || rcu_segcblist_empty(&rdp->cblist)) + if (rcu_segcblist_is_offloaded(&rdp->cblist) || + rcu_segcblist_empty(&rdp->cblist)) return; /* No callbacks to migrate. */ local_irq_save(flags); diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h index b8a43cf9bb4e..fc6133eed50a 100644 --- a/kernel/rcu/tree_plugin.h +++ b/kernel/rcu/tree_plugin.h @@ -1382,7 +1382,7 @@ static void rcu_prepare_for_idle(void) int tne; lockdep_assert_irqs_disabled(); - if (rcu_is_nocb_cpu(smp_processor_id())) + if (rcu_segcblist_is_offloaded(&rdp->cblist)) return; /* Handle nohz enablement switches conservatively. */ @@ -1431,8 +1431,10 @@ static void rcu_prepare_for_idle(void) */ static void rcu_cleanup_after_idle(void) { + struct rcu_data *rdp = this_cpu_ptr(&rcu_data); + lockdep_assert_irqs_disabled(); - if (rcu_is_nocb_cpu(smp_processor_id())) + if (rcu_segcblist_is_offloaded(&rdp->cblist)) return; if (rcu_try_advance_all_cbs()) invoke_rcu_core(); @@ -1694,7 +1696,7 @@ static bool __call_rcu_nocb(struct rcu_data *rdp, struct rcu_head *rhp, bool lazy, unsigned long flags) { - if (!rcu_is_nocb_cpu(rdp->cpu)) + if (!rcu_segcblist_is_offloaded(&rdp->cblist)) return false; __call_rcu_nocb_enqueue(rdp, rhp, &rhp->next, 1, lazy, flags); if (__is_kfree_rcu_offset((unsigned long)rhp->func)) @@ -1729,7 +1731,7 @@ static bool __maybe_unused rcu_nocb_adopt_orphan_cbs(struct rcu_data *my_rdp, unsigned long flags) { lockdep_assert_irqs_disabled(); - if (!rcu_is_nocb_cpu(smp_processor_id())) + if (!rcu_segcblist_is_offloaded(&my_rdp->cblist)) return false; /* Not NOCBs CPU, caller must migrate CBs. */ __call_rcu_nocb_enqueue(my_rdp, rcu_segcblist_head(&rdp->cblist), rcu_segcblist_tail(&rdp->cblist), @@ -2192,6 +2194,7 @@ static bool init_nocb_callback_list(struct rcu_data *rdp) } rcu_segcblist_init(&rdp->cblist); rcu_segcblist_disable(&rdp->cblist); + rcu_segcblist_offload(&rdp->cblist); return true; } |