diff options
Diffstat (limited to 'kernel/sched/cpufreq_schedutil.c')
| -rw-r--r-- | kernel/sched/cpufreq_schedutil.c | 103 | 
1 files changed, 78 insertions, 25 deletions
diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c index 3cde46483f0a..3fffad3bc8a8 100644 --- a/kernel/sched/cpufreq_schedutil.c +++ b/kernel/sched/cpufreq_schedutil.c @@ -53,9 +53,7 @@ struct sugov_cpu {  	unsigned int		iowait_boost_max;  	u64			last_update; -	/* The fields below are only needed when sharing a policy: */ -	unsigned long		util_cfs; -	unsigned long		util_dl; +	unsigned long		bw_dl;  	unsigned long		max;  	/* The field below is for single-CPU policies only: */ @@ -179,33 +177,90 @@ static unsigned int get_next_freq(struct sugov_policy *sg_policy,  	return cpufreq_driver_resolve_freq(policy, freq);  } -static void sugov_get_util(struct sugov_cpu *sg_cpu) +/* + * This function computes an effective utilization for the given CPU, to be + * used for frequency selection given the linear relation: f = u * f_max. + * + * The scheduler tracks the following metrics: + * + *   cpu_util_{cfs,rt,dl,irq}() + *   cpu_bw_dl() + * + * Where the cfs,rt and dl util numbers are tracked with the same metric and + * synchronized windows and are thus directly comparable. + * + * The cfs,rt,dl utilization are the running times measured with rq->clock_task + * which excludes things like IRQ and steal-time. These latter are then accrued + * in the irq utilization. + * + * The DL bandwidth number otoh is not a measured metric but a value computed + * based on the task model parameters and gives the minimal utilization + * required to meet deadlines. + */ +static unsigned long sugov_get_util(struct sugov_cpu *sg_cpu)  {  	struct rq *rq = cpu_rq(sg_cpu->cpu); +	unsigned long util, irq, max; -	sg_cpu->max = arch_scale_cpu_capacity(NULL, sg_cpu->cpu); -	sg_cpu->util_cfs = cpu_util_cfs(rq); -	sg_cpu->util_dl  = cpu_util_dl(rq); -} +	sg_cpu->max = max = arch_scale_cpu_capacity(NULL, sg_cpu->cpu); +	sg_cpu->bw_dl = cpu_bw_dl(rq); -static unsigned long sugov_aggregate_util(struct sugov_cpu *sg_cpu) -{ -	struct rq *rq = cpu_rq(sg_cpu->cpu); +	if (rt_rq_is_runnable(&rq->rt)) +		return max; -	if (rq->rt.rt_nr_running) -		return sg_cpu->max; +	/* +	 * Early check to see if IRQ/steal time saturates the CPU, can be +	 * because of inaccuracies in how we track these -- see +	 * update_irq_load_avg(). +	 */ +	irq = cpu_util_irq(rq); +	if (unlikely(irq >= max)) +		return max; + +	/* +	 * Because the time spend on RT/DL tasks is visible as 'lost' time to +	 * CFS tasks and we use the same metric to track the effective +	 * utilization (PELT windows are synchronized) we can directly add them +	 * to obtain the CPU's actual utilization. +	 */ +	util = cpu_util_cfs(rq); +	util += cpu_util_rt(rq); + +	/* +	 * We do not make cpu_util_dl() a permanent part of this sum because we +	 * want to use cpu_bw_dl() later on, but we need to check if the +	 * CFS+RT+DL sum is saturated (ie. no idle time) such that we select +	 * f_max when there is no idle time. +	 * +	 * NOTE: numerical errors or stop class might cause us to not quite hit +	 * saturation when we should -- something for later. +	 */ +	if ((util + cpu_util_dl(rq)) >= max) +		return max; + +	/* +	 * There is still idle time; further improve the number by using the +	 * irq metric. Because IRQ/steal time is hidden from the task clock we +	 * need to scale the task numbers: +	 * +	 *              1 - irq +	 *   U' = irq + ------- * U +	 *                max +	 */ +	util = scale_irq_capacity(util, irq, max); +	util += irq;  	/* -	 * Utilization required by DEADLINE must always be granted while, for -	 * FAIR, we use blocked utilization of IDLE CPUs as a mechanism to -	 * gracefully reduce the frequency when no tasks show up for longer +	 * Bandwidth required by DEADLINE must always be granted while, for +	 * FAIR and RT, we use blocked utilization of IDLE CPUs as a mechanism +	 * to gracefully reduce the frequency when no tasks show up for longer  	 * periods of time.  	 * -	 * Ideally we would like to set util_dl as min/guaranteed freq and -	 * util_cfs + util_dl as requested freq. However, cpufreq is not yet -	 * ready for such an interface. So, we only do the latter for now. +	 * Ideally we would like to set bw_dl as min/guaranteed freq and util + +	 * bw_dl as requested freq. However, cpufreq is not yet ready for such +	 * an interface. So, we only do the latter for now.  	 */ -	return min(sg_cpu->max, (sg_cpu->util_dl + sg_cpu->util_cfs)); +	return min(max, util + sg_cpu->bw_dl);  }  /** @@ -360,7 +415,7 @@ static inline bool sugov_cpu_is_busy(struct sugov_cpu *sg_cpu) { return false; }   */  static inline void ignore_dl_rate_limit(struct sugov_cpu *sg_cpu, struct sugov_policy *sg_policy)  { -	if (cpu_util_dl(cpu_rq(sg_cpu->cpu)) > sg_cpu->util_dl) +	if (cpu_bw_dl(cpu_rq(sg_cpu->cpu)) > sg_cpu->bw_dl)  		sg_policy->need_freq_update = true;  } @@ -383,9 +438,8 @@ static void sugov_update_single(struct update_util_data *hook, u64 time,  	busy = sugov_cpu_is_busy(sg_cpu); -	sugov_get_util(sg_cpu); +	util = sugov_get_util(sg_cpu);  	max = sg_cpu->max; -	util = sugov_aggregate_util(sg_cpu);  	sugov_iowait_apply(sg_cpu, time, &util, &max);  	next_f = get_next_freq(sg_policy, util, max);  	/* @@ -424,9 +478,8 @@ static unsigned int sugov_next_freq_shared(struct sugov_cpu *sg_cpu, u64 time)  		struct sugov_cpu *j_sg_cpu = &per_cpu(sugov_cpu, j);  		unsigned long j_util, j_max; -		sugov_get_util(j_sg_cpu); +		j_util = sugov_get_util(j_sg_cpu);  		j_max = j_sg_cpu->max; -		j_util = sugov_aggregate_util(j_sg_cpu);  		sugov_iowait_apply(j_sg_cpu, time, &j_util, &j_max);  		if (j_util * max > j_max * util) {  | 
