diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2022-01-11 09:29:44 -0800 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2022-01-11 09:29:44 -0800 |
commit | e7d38f16c20bf2a9b2502bb1d7407360d09a836a (patch) | |
tree | 762a8aa45ea77a9045acb5e8e67647806c61b113 /kernel/rcu/tree_stall.h | |
parent | a229327733b86aa585effdb0d27a87b12aa51597 (diff) | |
parent | f80fe66c38d561a006fb4f514b0ee5d11cbe2673 (diff) |
Merge tag 'rcu.2022.01.09a' of git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu
Pull RCU updates from Paul McKenney:
- Documentation updates, perhaps most notably Neil Brown's writeup of
the reference-counting analogy to RCU.
- Expedited grace-period cleanups.
- Remove CONFIG_RCU_FAST_NO_HZ due to lack of valid users. I have asked
around, posted a blog entry, and sent this series to LKML without
result.
- Miscellaneous fixes.
- RCU callback offloading updates, perhaps most notably Frederic
Weisbecker's updates allowing CPUs booted in the de-offloaded state
to be offloaded at runtime.
- nolibc fixes from Willy Tarreau and Anmar Faizi, but also including
Mark Brown's addition of gettid().
- RCU Tasks Trace fixes, including changes that increase the
scalability of call_rcu_tasks_trace() for the BPF folks (Martin Lau
and KP Singh).
- Various fixes including those from Wander Lairson Costa and Li
Zhijian.
- Fixes plus addition of tests for the increased call_rcu_tasks_trace()
scalability.
* tag 'rcu.2022.01.09a' of git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu: (87 commits)
rcu/nocb: Merge rcu_spawn_cpu_nocb_kthread() and rcu_spawn_one_nocb_kthread()
rcu/nocb: Allow empty "rcu_nocbs" kernel parameter
rcu/nocb: Create kthreads on all CPUs if "rcu_nocbs=" or "nohz_full=" are passed
rcu/nocb: Optimize kthreads and rdp initialization
rcu/nocb: Prepare nocb_cb_wait() to start with a non-offloaded rdp
rcu/nocb: Remove rcu_node structure from nocb list when de-offloaded
rcu-tasks: Use fewer callbacks queues if callback flood ends
rcu-tasks: Use separate ->percpu_dequeue_lim for callback dequeueing
rcu-tasks: Use more callback queues if contention encountered
rcu-tasks: Avoid raw-spinlocked wakeups from call_rcu_tasks_generic()
rcu-tasks: Count trylocks to estimate call_rcu_tasks() contention
rcu-tasks: Add rcupdate.rcu_task_enqueue_lim to set initial queueing
rcu-tasks: Make rcu_barrier_tasks*() handle multiple callback queues
rcu-tasks: Use workqueues for multiple rcu_tasks_invoke_cbs() invocations
rcu-tasks: Abstract invocations of callbacks
rcu-tasks: Abstract checking of callback lists
rcu-tasks: Add a ->percpu_enqueue_lim to the rcu_tasks structure
rcu-tasks: Inspect stalled task's trc state in locked state
rcu-tasks: Use spin_lock_rcu_node() and friends
rcutorture: Combine n_max_cbs from all kthreads in a callback flood
...
Diffstat (limited to 'kernel/rcu/tree_stall.h')
-rw-r--r-- | kernel/rcu/tree_stall.h | 27 |
1 files changed, 2 insertions, 25 deletions
diff --git a/kernel/rcu/tree_stall.h b/kernel/rcu/tree_stall.h index 5e2fa6fd97f1..21bebf7c9030 100644 --- a/kernel/rcu/tree_stall.h +++ b/kernel/rcu/tree_stall.h @@ -347,26 +347,6 @@ static void rcu_dump_cpu_stacks(void) } } -#ifdef CONFIG_RCU_FAST_NO_HZ - -static void print_cpu_stall_fast_no_hz(char *cp, int cpu) -{ - struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); - - sprintf(cp, "last_accelerate: %04lx/%04lx dyntick_enabled: %d", - rdp->last_accelerate & 0xffff, jiffies & 0xffff, - !!rdp->tick_nohz_enabled_snap); -} - -#else /* #ifdef CONFIG_RCU_FAST_NO_HZ */ - -static void print_cpu_stall_fast_no_hz(char *cp, int cpu) -{ - *cp = '\0'; -} - -#endif /* #else #ifdef CONFIG_RCU_FAST_NO_HZ */ - static const char * const gp_state_names[] = { [RCU_GP_IDLE] = "RCU_GP_IDLE", [RCU_GP_WAIT_GPS] = "RCU_GP_WAIT_GPS", @@ -408,13 +388,12 @@ static bool rcu_is_gp_kthread_starving(unsigned long *jp) * of RCU grace periods that this CPU is ignorant of, for example, "1" * if the CPU was aware of the previous grace period. * - * Also print out idle and (if CONFIG_RCU_FAST_NO_HZ) idle-entry info. + * Also print out idle info. */ static void print_cpu_stall_info(int cpu) { unsigned long delta; bool falsepositive; - char fast_no_hz[72]; struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); char *ticks_title; unsigned long ticks_value; @@ -432,11 +411,10 @@ static void print_cpu_stall_info(int cpu) ticks_title = "ticks this GP"; ticks_value = rdp->ticks_this_gp; } - print_cpu_stall_fast_no_hz(fast_no_hz, cpu); delta = rcu_seq_ctr(rdp->mynode->gp_seq - rdp->rcu_iw_gp_seq); falsepositive = rcu_is_gp_kthread_starving(NULL) && rcu_dynticks_in_eqs(rcu_dynticks_snap(rdp)); - pr_err("\t%d-%c%c%c%c: (%lu %s) idle=%03x/%ld/%#lx softirq=%u/%u fqs=%ld %s%s\n", + pr_err("\t%d-%c%c%c%c: (%lu %s) idle=%03x/%ld/%#lx softirq=%u/%u fqs=%ld %s\n", cpu, "O."[!!cpu_online(cpu)], "o."[!!(rdp->grpmask & rdp->mynode->qsmaskinit)], @@ -449,7 +427,6 @@ static void print_cpu_stall_info(int cpu) rdp->dynticks_nesting, rdp->dynticks_nmi_nesting, rdp->softirq_snap, kstat_softirqs_cpu(RCU_SOFTIRQ, cpu), data_race(rcu_state.n_force_qs) - rcu_state.n_force_qs_gpstart, - fast_no_hz, falsepositive ? " (false positive?)" : ""); } |