diff options
author | Paul E. McKenney <paulmck@kernel.org> | 2021-11-12 07:33:40 -0800 |
---|---|---|
committer | Paul E. McKenney <paulmck@kernel.org> | 2021-12-09 10:52:11 -0800 |
commit | 8610b65680390a103b58f46282a1b05f7eebbba4 (patch) | |
tree | ca427b83e50b363a8b40471c17c1a408147c46cf /kernel/rcu | |
parent | ce9b1c667f03e0aa30d3eb69d0932e010d131c49 (diff) |
rcu-tasks: Add rcupdate.rcu_task_enqueue_lim to set initial queueing
This commit adds a rcupdate.rcu_task_enqueue_lim module parameter that
sets the initial number of callback queues to use for the RCU Tasks
family of RCU implementations. This parameter allows testing of various
fanout values.
Reported-by: Martin Lau <kafai@fb.com>
Cc: Neeraj Upadhyay <neeraj.iitr10@gmail.com>
Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
Diffstat (limited to 'kernel/rcu')
-rw-r--r-- | kernel/rcu/tasks.h | 24 |
1 files changed, 18 insertions, 6 deletions
diff --git a/kernel/rcu/tasks.h b/kernel/rcu/tasks.h index 8e6601c5fd2e..9d3eddaecfde 100644 --- a/kernel/rcu/tasks.h +++ b/kernel/rcu/tasks.h @@ -130,6 +130,9 @@ module_param(rcu_task_ipi_delay, int, 0644); static int rcu_task_stall_timeout __read_mostly = RCU_TASK_STALL_TIMEOUT; module_param(rcu_task_stall_timeout, int, 0644); +static int rcu_task_enqueue_lim __read_mostly = -1; +module_param(rcu_task_enqueue_lim, int, 0444); + /* RCU tasks grace-period state for debugging. */ #define RTGS_INIT 0 #define RTGS_WAIT_WAIT_CBS 1 @@ -192,10 +195,19 @@ static void cblist_init_generic(struct rcu_tasks *rtp) { int cpu; unsigned long flags; + int lim; raw_spin_lock_irqsave(&rtp->cbs_gbl_lock, flags); - rtp->percpu_enqueue_shift = ilog2(nr_cpu_ids); - rtp->percpu_enqueue_lim = 1; + if (rcu_task_enqueue_lim < 0) + rcu_task_enqueue_lim = nr_cpu_ids; + else if (rcu_task_enqueue_lim == 0) + rcu_task_enqueue_lim = 1; + lim = rcu_task_enqueue_lim; + + if (lim > nr_cpu_ids) + lim = nr_cpu_ids; + WRITE_ONCE(rtp->percpu_enqueue_shift, ilog2(nr_cpu_ids / lim)); + smp_store_release(&rtp->percpu_enqueue_lim, lim); for_each_possible_cpu(cpu) { struct rcu_tasks_percpu *rtpcp = per_cpu_ptr(rtp->rtpcpu, cpu); @@ -211,7 +223,7 @@ static void cblist_init_generic(struct rcu_tasks *rtp) raw_spin_unlock_rcu_node(rtpcp); // irqs remain disabled. } raw_spin_unlock_irqrestore(&rtp->cbs_gbl_lock, flags); - + pr_info("%s: Setting shift to %d and lim to %d.\n", __func__, data_race(rtp->percpu_enqueue_shift), data_race(rtp->percpu_enqueue_lim)); } // Enqueue a callback for the specified flavor of Tasks RCU. @@ -307,7 +319,7 @@ static int rcu_tasks_need_gpcb(struct rcu_tasks *rtp) unsigned long flags; int needgpcb = 0; - for (cpu = 0; cpu < rtp->percpu_enqueue_lim; cpu++) { + for (cpu = 0; cpu < smp_load_acquire(&rtp->percpu_enqueue_lim); cpu++) { struct rcu_tasks_percpu *rtpcp = per_cpu_ptr(rtp->rtpcpu, cpu); /* Advance and accelerate any new callbacks. */ @@ -338,11 +350,11 @@ static void rcu_tasks_invoke_cbs(struct rcu_tasks *rtp, struct rcu_tasks_percpu cpu = rtpcp->cpu; cpunext = cpu * 2 + 1; - if (cpunext < rtp->percpu_enqueue_lim) { + if (cpunext < smp_load_acquire(&rtp->percpu_enqueue_lim)) { rtpcp_next = per_cpu_ptr(rtp->rtpcpu, cpunext); queue_work_on(cpunext, system_wq, &rtpcp_next->rtp_work); cpunext++; - if (cpunext < rtp->percpu_enqueue_lim) { + if (cpunext < smp_load_acquire(&rtp->percpu_enqueue_lim)) { rtpcp_next = per_cpu_ptr(rtp->rtpcpu, cpunext); queue_work_on(cpunext, system_wq, &rtpcp_next->rtp_work); } |