summaryrefslogtreecommitdiff
path: root/kernel/rcu
diff options
context:
space:
mode:
authorPaul E. McKenney <paulmck@linux.vnet.ibm.com>2018-05-17 13:32:51 -0700
committerPaul E. McKenney <paulmck@linux.vnet.ibm.com>2018-07-12 15:39:20 -0700
commitb06ae25a1e2b54b2b5bc589a4a118b7bb39159fe (patch)
treecfdac7dc54f1027835fe982422afe8edc3e927e0 /kernel/rcu
parent51fbb910f52c8559a78665d203e55ab2b95e7126 (diff)
rcu: Use RCU CPU stall timeout for rcu_check_gp_start_stall()
Currently, rcu_check_gp_start_stall() waits for one second after the first request before complaining that a grace period has not yet started. This was desirable while testing the conversion from ->future_gp_needed[] to ->gp_seq_needed, but it is a bit on the hair-trigger side for production use under heavy load. This commit therefore makes this wait time be exactly that of the RCU CPU stall warning, allowing easy adjustment of both timeouts to suit the distribution or installation at hand. Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Diffstat (limited to 'kernel/rcu')
-rw-r--r--kernel/rcu/tree.c17
1 files changed, 9 insertions, 8 deletions
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index cdc4fca0c4cb..7746fe1ee3fc 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -2753,6 +2753,7 @@ static void
rcu_check_gp_start_stall(struct rcu_state *rsp, struct rcu_node *rnp,
struct rcu_data *rdp)
{
+ const unsigned long gpssdelay = rcu_jiffies_till_stall_check() * HZ;
unsigned long flags;
unsigned long j;
struct rcu_node *rnp_root = rcu_get_root(rsp);
@@ -2762,8 +2763,8 @@ rcu_check_gp_start_stall(struct rcu_state *rsp, struct rcu_node *rnp,
ULONG_CMP_GE(rnp_root->gp_seq, rnp_root->gp_seq_needed))
return;
j = jiffies; /* Expensive access, and in common case don't get here. */
- if (time_before(j, READ_ONCE(rsp->gp_req_activity) + HZ) ||
- time_before(j, READ_ONCE(rsp->gp_activity) + HZ) ||
+ if (time_before(j, READ_ONCE(rsp->gp_req_activity) + gpssdelay) ||
+ time_before(j, READ_ONCE(rsp->gp_activity) + gpssdelay) ||
atomic_read(&warned))
return;
@@ -2771,8 +2772,8 @@ rcu_check_gp_start_stall(struct rcu_state *rsp, struct rcu_node *rnp,
j = jiffies;
if (rcu_gp_in_progress(rsp) ||
ULONG_CMP_GE(rnp_root->gp_seq, rnp_root->gp_seq_needed) ||
- time_before(j, READ_ONCE(rsp->gp_req_activity) + HZ) ||
- time_before(j, READ_ONCE(rsp->gp_activity) + HZ) ||
+ time_before(j, READ_ONCE(rsp->gp_req_activity) + gpssdelay) ||
+ time_before(j, READ_ONCE(rsp->gp_activity) + gpssdelay) ||
atomic_read(&warned)) {
raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
return;
@@ -2784,18 +2785,18 @@ rcu_check_gp_start_stall(struct rcu_state *rsp, struct rcu_node *rnp,
j = jiffies;
if (rcu_gp_in_progress(rsp) ||
ULONG_CMP_GE(rnp_root->gp_seq, rnp_root->gp_seq_needed) ||
- time_before(j, rsp->gp_req_activity + HZ) ||
- time_before(j, rsp->gp_activity + HZ) ||
+ time_before(j, rsp->gp_req_activity + gpssdelay) ||
+ time_before(j, rsp->gp_activity + gpssdelay) ||
atomic_xchg(&warned, 1)) {
raw_spin_unlock_rcu_node(rnp_root); /* irqs remain disabled. */
raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
return;
}
- pr_alert("%s: g%ld->%ld gar:%lu ga:%lu f%#x %s->state:%#lx\n",
+ pr_alert("%s: g%ld->%ld gar:%lu ga:%lu f%#x gs:%d %s->state:%#lx\n",
__func__, (long)READ_ONCE(rsp->gp_seq),
(long)READ_ONCE(rnp_root->gp_seq_needed),
j - rsp->gp_req_activity, j - rsp->gp_activity,
- rsp->gp_flags, rsp->name,
+ rsp->gp_flags, rsp->gp_state, rsp->name,
rsp->gp_kthread ? rsp->gp_kthread->state : 0x1ffffL);
WARN_ON(1);
if (rnp_root != rnp)