diff options
| author | Jiri Kosina <jkosina@suse.cz> | 2022-03-23 09:58:40 +0100 | 
|---|---|---|
| committer | Jiri Kosina <jkosina@suse.cz> | 2022-03-23 09:58:40 +0100 | 
| commit | b690490d6d466972ade172ee2e7f6ffa49e7e910 (patch) | |
| tree | 50a93da28c9128e19eb7a3038aecf75dab6b36e1 /kernel/sched/core.c | |
| parent | f97ec5d75e9261a5da78dc28a8955b7cc0c4468b (diff) | |
| parent | 0f203948230720e849ad50d158adac1cd32c282f (diff) | |
Merge branch 'for-5.18/amd-sfh' into for-linus
- dead code elimination (Christophe JAILLET)
Diffstat (limited to 'kernel/sched/core.c')
| -rw-r--r-- | kernel/sched/core.c | 103 | 
1 files changed, 74 insertions, 29 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c index bb5e7fd25354..2e4ae00e52d1 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -144,7 +144,7 @@ static inline bool __sched_core_less(struct task_struct *a, struct task_struct *  		return false;  	/* flip prio, so high prio is leftmost */ -	if (prio_less(b, a, task_rq(a)->core->core_forceidle)) +	if (prio_less(b, a, !!task_rq(a)->core->core_forceidle_count))  		return true;  	return false; @@ -181,15 +181,23 @@ void sched_core_enqueue(struct rq *rq, struct task_struct *p)  	rb_add(&p->core_node, &rq->core_tree, rb_sched_core_less);  } -void sched_core_dequeue(struct rq *rq, struct task_struct *p) +void sched_core_dequeue(struct rq *rq, struct task_struct *p, int flags)  {  	rq->core->core_task_seq++; -	if (!sched_core_enqueued(p)) -		return; +	if (sched_core_enqueued(p)) { +		rb_erase(&p->core_node, &rq->core_tree); +		RB_CLEAR_NODE(&p->core_node); +	} -	rb_erase(&p->core_node, &rq->core_tree); -	RB_CLEAR_NODE(&p->core_node); +	/* +	 * Migrating the last task off the cpu, with the cpu in forced idle +	 * state. Reschedule to create an accounting edge for forced idle, +	 * and re-examine whether the core is still in forced idle state. +	 */ +	if (!(flags & DEQUEUE_SAVE) && rq->nr_running == 1 && +	    rq->core->core_forceidle_count && rq->curr == rq->idle) +		resched_curr(rq);  }  /* @@ -280,6 +288,8 @@ static void __sched_core_flip(bool enabled)  		for_each_cpu(t, smt_mask)  			cpu_rq(t)->core_enabled = enabled; +		cpu_rq(cpu)->core->core_forceidle_start = 0; +  		sched_core_unlock(cpu, &flags);  		cpumask_andnot(&sched_core_mask, &sched_core_mask, smt_mask); @@ -364,7 +374,8 @@ void sched_core_put(void)  #else /* !CONFIG_SCHED_CORE */  static inline void sched_core_enqueue(struct rq *rq, struct task_struct *p) { } -static inline void sched_core_dequeue(struct rq *rq, struct task_struct *p) { } +static inline void +sched_core_dequeue(struct rq *rq, struct task_struct *p, int flags) { }  #endif /* CONFIG_SCHED_CORE */ @@ -2005,7 +2016,7 @@ static inline void enqueue_task(struct rq *rq, struct task_struct *p, int flags)  static inline void dequeue_task(struct rq *rq, struct task_struct *p, int flags)  {  	if (sched_core_enabled(rq)) -		sched_core_dequeue(rq, p); +		sched_core_dequeue(rq, p, flags);  	if (!(flags & DEQUEUE_NOCLOCK))  		update_rq_clock(rq); @@ -2173,6 +2184,9 @@ void migrate_enable(void)  		return;  	} +	if (WARN_ON_ONCE(!p->migration_disabled)) +		return; +  	/*  	 * Ensure stop_task runs either before or after this, and that  	 * __set_cpus_allowed_ptr(SCA_MIGRATE_ENABLE) doesn't schedule(). @@ -5244,6 +5258,7 @@ void scheduler_tick(void)  	if (sched_feat(LATENCY_WARN))  		resched_latency = cpu_resched_latency(rq);  	calc_global_load_tick(rq); +	sched_core_tick(rq);  	rq_unlock(rq, &rf); @@ -5656,6 +5671,7 @@ pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)  	struct task_struct *next, *p, *max = NULL;  	const struct cpumask *smt_mask;  	bool fi_before = false; +	bool core_clock_updated = (rq == rq->core);  	unsigned long cookie;  	int i, cpu, occ = 0;  	struct rq *rq_i; @@ -5708,10 +5724,18 @@ pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)  	/* reset state */  	rq->core->core_cookie = 0UL; -	if (rq->core->core_forceidle) { +	if (rq->core->core_forceidle_count) { +		if (!core_clock_updated) { +			update_rq_clock(rq->core); +			core_clock_updated = true; +		} +		sched_core_account_forceidle(rq); +		/* reset after accounting force idle */ +		rq->core->core_forceidle_start = 0; +		rq->core->core_forceidle_count = 0; +		rq->core->core_forceidle_occupation = 0;  		need_sync = true;  		fi_before = true; -		rq->core->core_forceidle = false;  	}  	/* @@ -5753,7 +5777,12 @@ pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)  	for_each_cpu_wrap(i, smt_mask, cpu) {  		rq_i = cpu_rq(i); -		if (i != cpu) +		/* +		 * Current cpu always has its clock updated on entrance to +		 * pick_next_task(). If the current cpu is not the core, +		 * the core may also have been updated above. +		 */ +		if (i != cpu && (rq_i != rq->core || !core_clock_updated))  			update_rq_clock(rq_i);  		p = rq_i->core_pick = pick_task(rq_i); @@ -5783,7 +5812,7 @@ pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)  		if (p == rq_i->idle) {  			if (rq_i->nr_running) { -				rq->core->core_forceidle = true; +				rq->core->core_forceidle_count++;  				if (!fi_before)  					rq->core->core_forceidle_seq++;  			} @@ -5792,6 +5821,12 @@ pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)  		}  	} +	if (schedstat_enabled() && rq->core->core_forceidle_count) { +		if (cookie) +			rq->core->core_forceidle_start = rq_clock(rq->core); +		rq->core->core_forceidle_occupation = occ; +	} +  	rq->core->core_pick_seq = rq->core->core_task_seq;  	next = rq->core_pick;  	rq->core_sched_seq = rq->core->core_pick_seq; @@ -5828,8 +5863,8 @@ pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)  		 *  1            0       1  		 *  1            1       0  		 */ -		if (!(fi_before && rq->core->core_forceidle)) -			task_vruntime_update(rq_i, rq_i->core_pick, rq->core->core_forceidle); +		if (!(fi_before && rq->core->core_forceidle_count)) +			task_vruntime_update(rq_i, rq_i->core_pick, !!rq->core->core_forceidle_count);  		rq_i->core_pick->core_occupation = occ; @@ -6033,11 +6068,19 @@ static void sched_core_cpu_deactivate(unsigned int cpu)  		goto unlock;  	/* copy the shared state to the new leader */ -	core_rq->core_task_seq      = rq->core_task_seq; -	core_rq->core_pick_seq      = rq->core_pick_seq; -	core_rq->core_cookie        = rq->core_cookie; -	core_rq->core_forceidle     = rq->core_forceidle; -	core_rq->core_forceidle_seq = rq->core_forceidle_seq; +	core_rq->core_task_seq             = rq->core_task_seq; +	core_rq->core_pick_seq             = rq->core_pick_seq; +	core_rq->core_cookie               = rq->core_cookie; +	core_rq->core_forceidle_count      = rq->core_forceidle_count; +	core_rq->core_forceidle_seq        = rq->core_forceidle_seq; +	core_rq->core_forceidle_occupation = rq->core_forceidle_occupation; + +	/* +	 * Accounting edge for forced idle is handled in pick_next_task(). +	 * Don't need another one here, since the hotplug thread shouldn't +	 * have a cookie. +	 */ +	core_rq->core_forceidle_start = 0;  	/* install new leader */  	for_each_cpu(t, smt_mask) { @@ -7126,7 +7169,7 @@ unsigned long effective_cpu_util(int cpu, unsigned long util_cfs,  unsigned long sched_cpu_util(int cpu, unsigned long max)  { -	return effective_cpu_util(cpu, cpu_util_cfs(cpu_rq(cpu)), max, +	return effective_cpu_util(cpu, cpu_util_cfs(cpu), max,  				  ENERGY_UTIL, NULL);  }  #endif /* CONFIG_SMP */ @@ -8599,14 +8642,6 @@ void __init init_idle(struct task_struct *idle, int cpu)  	__sched_fork(0, idle); -	/* -	 * The idle task doesn't need the kthread struct to function, but it -	 * is dressed up as a per-CPU kthread and thus needs to play the part -	 * if we want to avoid special-casing it in code that deals with per-CPU -	 * kthreads. -	 */ -	set_kthread_struct(idle); -  	raw_spin_lock_irqsave(&idle->pi_lock, flags);  	raw_spin_rq_lock(rq); @@ -9409,7 +9444,9 @@ void __init sched_init(void)  		rq->core_pick = NULL;  		rq->core_enabled = 0;  		rq->core_tree = RB_ROOT; -		rq->core_forceidle = false; +		rq->core_forceidle_count = 0; +		rq->core_forceidle_occupation = 0; +		rq->core_forceidle_start = 0;  		rq->core_cookie = 0UL;  #endif @@ -9424,6 +9461,14 @@ void __init sched_init(void)  	enter_lazy_tlb(&init_mm, current);  	/* +	 * The idle task doesn't need the kthread struct to function, but it +	 * is dressed up as a per-CPU kthread and thus needs to play the part +	 * if we want to avoid special-casing it in code that deals with per-CPU +	 * kthreads. +	 */ +	WARN_ON(!set_kthread_struct(current)); + +	/*  	 * Make us the idle thread. Technically, schedule() should not be  	 * called from this thread, however somewhere below it might be,  	 * but because we are the idle thread, we just pick up running again  | 
