diff options
Diffstat (limited to 'kernel/rcu/tree_plugin.h')
| -rw-r--r-- | kernel/rcu/tree_plugin.h | 111 | 
1 files changed, 59 insertions, 52 deletions
diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h index c1d7f27bd38f..3ec85cb5d544 100644 --- a/kernel/rcu/tree_plugin.h +++ b/kernel/rcu/tree_plugin.h @@ -30,14 +30,24 @@  #include <linux/smpboot.h>  #include "../time/tick-internal.h" -#define RCU_KTHREAD_PRIO 1 -  #ifdef CONFIG_RCU_BOOST +  #include "../locking/rtmutex_common.h" -#define RCU_BOOST_PRIO CONFIG_RCU_BOOST_PRIO -#else -#define RCU_BOOST_PRIO RCU_KTHREAD_PRIO -#endif + +/* rcuc/rcub kthread realtime priority */ +static int kthread_prio = CONFIG_RCU_KTHREAD_PRIO; +module_param(kthread_prio, int, 0644); + +/* + * Control variables for per-CPU and per-rcu_node kthreads.  These + * handle all flavors of RCU. + */ +static DEFINE_PER_CPU(struct task_struct *, rcu_cpu_kthread_task); +DEFINE_PER_CPU(unsigned int, rcu_cpu_kthread_status); +DEFINE_PER_CPU(unsigned int, rcu_cpu_kthread_loops); +DEFINE_PER_CPU(char, rcu_cpu_has_work); + +#endif /* #ifdef CONFIG_RCU_BOOST */  #ifdef CONFIG_RCU_NOCB_CPU  static cpumask_var_t rcu_nocb_mask; /* CPUs to have callbacks offloaded. */ @@ -72,9 +82,6 @@ static void __init rcu_bootup_announce_oddness(void)  #ifdef CONFIG_RCU_TORTURE_TEST_RUNNABLE  	pr_info("\tRCU torture testing starts during boot.\n");  #endif -#if defined(CONFIG_TREE_PREEMPT_RCU) && !defined(CONFIG_RCU_CPU_STALL_VERBOSE) -	pr_info("\tDump stacks of tasks blocking RCU-preempt GP.\n"); -#endif  #if defined(CONFIG_RCU_CPU_STALL_INFO)  	pr_info("\tAdditional per-CPU info printed with stalls.\n");  #endif @@ -85,9 +92,12 @@ static void __init rcu_bootup_announce_oddness(void)  		pr_info("\tBoot-time adjustment of leaf fanout to %d.\n", rcu_fanout_leaf);  	if (nr_cpu_ids != NR_CPUS)  		pr_info("\tRCU restricting CPUs from NR_CPUS=%d to nr_cpu_ids=%d.\n", NR_CPUS, nr_cpu_ids); +#ifdef CONFIG_RCU_BOOST +	pr_info("\tRCU kthread priority: %d.\n", kthread_prio); +#endif  } -#ifdef CONFIG_TREE_PREEMPT_RCU +#ifdef CONFIG_PREEMPT_RCU  RCU_STATE_INITIALIZER(rcu_preempt, 'p', call_rcu);  static struct rcu_state *rcu_state_p = &rcu_preempt_state; @@ -156,7 +166,7 @@ static void rcu_preempt_qs(void)   *   * Caller must disable preemption.   */ -static void rcu_preempt_note_context_switch(int cpu) +static void rcu_preempt_note_context_switch(void)  {  	struct task_struct *t = current;  	unsigned long flags; @@ -167,7 +177,7 @@ static void rcu_preempt_note_context_switch(int cpu)  	    !t->rcu_read_unlock_special.b.blocked) {  		/* Possibly blocking in an RCU read-side critical section. */ -		rdp = per_cpu_ptr(rcu_preempt_state.rda, cpu); +		rdp = this_cpu_ptr(rcu_preempt_state.rda);  		rnp = rdp->mynode;  		raw_spin_lock_irqsave(&rnp->lock, flags);  		smp_mb__after_unlock_lock(); @@ -415,8 +425,6 @@ void rcu_read_unlock_special(struct task_struct *t)  	}  } -#ifdef CONFIG_RCU_CPU_STALL_VERBOSE -  /*   * Dump detailed information for all tasks blocking the current RCU   * grace period on the specified rcu_node structure. @@ -451,14 +459,6 @@ static void rcu_print_detail_task_stall(struct rcu_state *rsp)  		rcu_print_detail_task_stall_rnp(rnp);  } -#else /* #ifdef CONFIG_RCU_CPU_STALL_VERBOSE */ - -static void rcu_print_detail_task_stall(struct rcu_state *rsp) -{ -} - -#endif /* #else #ifdef CONFIG_RCU_CPU_STALL_VERBOSE */ -  #ifdef CONFIG_RCU_CPU_STALL_INFO  static void rcu_print_task_stall_begin(struct rcu_node *rnp) @@ -621,7 +621,7 @@ static int rcu_preempt_offline_tasks(struct rcu_state *rsp,   *   * Caller must disable hard irqs.   */ -static void rcu_preempt_check_callbacks(int cpu) +static void rcu_preempt_check_callbacks(void)  {  	struct task_struct *t = current; @@ -630,8 +630,8 @@ static void rcu_preempt_check_callbacks(int cpu)  		return;  	}  	if (t->rcu_read_lock_nesting > 0 && -	    per_cpu(rcu_preempt_data, cpu).qs_pending && -	    !per_cpu(rcu_preempt_data, cpu).passed_quiesce) +	    __this_cpu_read(rcu_preempt_data.qs_pending) && +	    !__this_cpu_read(rcu_preempt_data.passed_quiesce))  		t->rcu_read_unlock_special.b.need_qs = true;  } @@ -919,7 +919,7 @@ void exit_rcu(void)  	__rcu_read_unlock();  } -#else /* #ifdef CONFIG_TREE_PREEMPT_RCU */ +#else /* #ifdef CONFIG_PREEMPT_RCU */  static struct rcu_state *rcu_state_p = &rcu_sched_state; @@ -945,7 +945,7 @@ EXPORT_SYMBOL_GPL(rcu_batches_completed);   * Because preemptible RCU does not exist, we never have to check for   * CPUs being in quiescent states.   */ -static void rcu_preempt_note_context_switch(int cpu) +static void rcu_preempt_note_context_switch(void)  {  } @@ -1017,7 +1017,7 @@ static int rcu_preempt_offline_tasks(struct rcu_state *rsp,   * Because preemptible RCU does not exist, it never has any callbacks   * to check.   */ -static void rcu_preempt_check_callbacks(int cpu) +static void rcu_preempt_check_callbacks(void)  {  } @@ -1070,7 +1070,7 @@ void exit_rcu(void)  {  } -#endif /* #else #ifdef CONFIG_TREE_PREEMPT_RCU */ +#endif /* #else #ifdef CONFIG_PREEMPT_RCU */  #ifdef CONFIG_RCU_BOOST @@ -1326,7 +1326,7 @@ static int rcu_spawn_one_boost_kthread(struct rcu_state *rsp,  	smp_mb__after_unlock_lock();  	rnp->boost_kthread_task = t;  	raw_spin_unlock_irqrestore(&rnp->lock, flags); -	sp.sched_priority = RCU_BOOST_PRIO; +	sp.sched_priority = kthread_prio;  	sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);  	wake_up_process(t); /* get to TASK_INTERRUPTIBLE quickly. */  	return 0; @@ -1343,7 +1343,7 @@ static void rcu_cpu_kthread_setup(unsigned int cpu)  {  	struct sched_param sp; -	sp.sched_priority = RCU_KTHREAD_PRIO; +	sp.sched_priority = kthread_prio;  	sched_setscheduler_nocheck(current, SCHED_FIFO, &sp);  } @@ -1512,10 +1512,10 @@ static void rcu_prepare_kthreads(int cpu)   * any flavor of RCU.   */  #ifndef CONFIG_RCU_NOCB_CPU_ALL -int rcu_needs_cpu(int cpu, unsigned long *delta_jiffies) +int rcu_needs_cpu(unsigned long *delta_jiffies)  {  	*delta_jiffies = ULONG_MAX; -	return rcu_cpu_has_callbacks(cpu, NULL); +	return rcu_cpu_has_callbacks(NULL);  }  #endif /* #ifndef CONFIG_RCU_NOCB_CPU_ALL */ @@ -1523,7 +1523,7 @@ int rcu_needs_cpu(int cpu, unsigned long *delta_jiffies)   * Because we do not have RCU_FAST_NO_HZ, don't bother cleaning up   * after it.   */ -static void rcu_cleanup_after_idle(int cpu) +static void rcu_cleanup_after_idle(void)  {  } @@ -1531,7 +1531,7 @@ static void rcu_cleanup_after_idle(int cpu)   * Do the idle-entry grace-period work, which, because CONFIG_RCU_FAST_NO_HZ=n,   * is nothing.   */ -static void rcu_prepare_for_idle(int cpu) +static void rcu_prepare_for_idle(void)  {  } @@ -1624,15 +1624,15 @@ static bool __maybe_unused rcu_try_advance_all_cbs(void)   * The caller must have disabled interrupts.   */  #ifndef CONFIG_RCU_NOCB_CPU_ALL -int rcu_needs_cpu(int cpu, unsigned long *dj) +int rcu_needs_cpu(unsigned long *dj)  { -	struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu); +	struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);  	/* Snapshot to detect later posting of non-lazy callback. */  	rdtp->nonlazy_posted_snap = rdtp->nonlazy_posted;  	/* If no callbacks, RCU doesn't need the CPU. */ -	if (!rcu_cpu_has_callbacks(cpu, &rdtp->all_lazy)) { +	if (!rcu_cpu_has_callbacks(&rdtp->all_lazy)) {  		*dj = ULONG_MAX;  		return 0;  	} @@ -1666,12 +1666,12 @@ int rcu_needs_cpu(int cpu, unsigned long *dj)   *   * The caller must have disabled interrupts.   */ -static void rcu_prepare_for_idle(int cpu) +static void rcu_prepare_for_idle(void)  {  #ifndef CONFIG_RCU_NOCB_CPU_ALL  	bool needwake;  	struct rcu_data *rdp; -	struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu); +	struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);  	struct rcu_node *rnp;  	struct rcu_state *rsp;  	int tne; @@ -1679,7 +1679,7 @@ static void rcu_prepare_for_idle(int cpu)  	/* Handle nohz enablement switches conservatively. */  	tne = ACCESS_ONCE(tick_nohz_active);  	if (tne != rdtp->tick_nohz_enabled_snap) { -		if (rcu_cpu_has_callbacks(cpu, NULL)) +		if (rcu_cpu_has_callbacks(NULL))  			invoke_rcu_core(); /* force nohz to see update. */  		rdtp->tick_nohz_enabled_snap = tne;  		return; @@ -1688,7 +1688,7 @@ static void rcu_prepare_for_idle(int cpu)  		return;  	/* If this is a no-CBs CPU, no callbacks, just return. */ -	if (rcu_is_nocb_cpu(cpu)) +	if (rcu_is_nocb_cpu(smp_processor_id()))  		return;  	/* @@ -1712,7 +1712,7 @@ static void rcu_prepare_for_idle(int cpu)  		return;  	rdtp->last_accelerate = jiffies;  	for_each_rcu_flavor(rsp) { -		rdp = per_cpu_ptr(rsp->rda, cpu); +		rdp = this_cpu_ptr(rsp->rda);  		if (!*rdp->nxttail[RCU_DONE_TAIL])  			continue;  		rnp = rdp->mynode; @@ -1731,10 +1731,10 @@ static void rcu_prepare_for_idle(int cpu)   * any grace periods that elapsed while the CPU was idle, and if any   * callbacks are now ready to invoke, initiate invocation.   */ -static void rcu_cleanup_after_idle(int cpu) +static void rcu_cleanup_after_idle(void)  {  #ifndef CONFIG_RCU_NOCB_CPU_ALL -	if (rcu_is_nocb_cpu(cpu)) +	if (rcu_is_nocb_cpu(smp_processor_id()))  		return;  	if (rcu_try_advance_all_cbs())  		invoke_rcu_core(); @@ -2573,9 +2573,13 @@ static void rcu_spawn_one_nocb_kthread(struct rcu_state *rsp, int cpu)  			rdp->nocb_leader = rdp_spawn;  			if (rdp_last && rdp != rdp_spawn)  				rdp_last->nocb_next_follower = rdp; -			rdp_last = rdp; -			rdp = rdp->nocb_next_follower; -			rdp_last->nocb_next_follower = NULL; +			if (rdp == rdp_spawn) { +				rdp = rdp->nocb_next_follower; +			} else { +				rdp_last = rdp; +				rdp = rdp->nocb_next_follower; +				rdp_last->nocb_next_follower = NULL; +			}  		} while (rdp);  		rdp_spawn->nocb_next_follower = rdp_old_leader;  	} @@ -2761,9 +2765,10 @@ static int full_sysidle_state;		/* Current system-idle state. */   * to detect full-system idle states, not RCU quiescent states and grace   * periods.  The caller must have disabled interrupts.   */ -static void rcu_sysidle_enter(struct rcu_dynticks *rdtp, int irq) +static void rcu_sysidle_enter(int irq)  {  	unsigned long j; +	struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);  	/* If there are no nohz_full= CPUs, no need to track this. */  	if (!tick_nohz_full_enabled()) @@ -2832,8 +2837,10 @@ void rcu_sysidle_force_exit(void)   * usermode execution does -not- count as idle here!  The caller must   * have disabled interrupts.   */ -static void rcu_sysidle_exit(struct rcu_dynticks *rdtp, int irq) +static void rcu_sysidle_exit(int irq)  { +	struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks); +  	/* If there are no nohz_full= CPUs, no need to track this. */  	if (!tick_nohz_full_enabled())  		return; @@ -3127,11 +3134,11 @@ static void rcu_sysidle_init_percpu_data(struct rcu_dynticks *rdtp)  #else /* #ifdef CONFIG_NO_HZ_FULL_SYSIDLE */ -static void rcu_sysidle_enter(struct rcu_dynticks *rdtp, int irq) +static void rcu_sysidle_enter(int irq)  {  } -static void rcu_sysidle_exit(struct rcu_dynticks *rdtp, int irq) +static void rcu_sysidle_exit(int irq)  {  }  | 
