diff options
author | Dietmar Eggemann <dietmar.eggemann@arm.com> | 2024-09-05 00:05:23 +0200 |
---|---|---|
committer | Peter Zijlstra <peterz@infradead.org> | 2024-09-10 09:51:15 +0200 |
commit | 729288bc68560b4d5b094cb7a6f794c752ef22a2 (patch) | |
tree | cf49ebd585ea7a79885f83fa6b923a964ecd7c5c /kernel/sched/fair.c | |
parent | 6b9ccbc033cf179956a37fef3ee415bdc3029d2f (diff) |
kernel/sched: Fix util_est accounting for DELAY_DEQUEUE
Remove delayed tasks from util_est even they are runnable.
Exclude delayed task which are (a) migrating between rq's or (b) in a
SAVE/RESTORE dequeue/enqueue.
Signed-off-by: Dietmar Eggemann <dietmar.eggemann@arm.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Link: https://lkml.kernel.org/r/c49ef5fe-a909-43f1-b02f-a765ab9cedbf@arm.com
Diffstat (limited to 'kernel/sched/fair.c')
-rw-r--r-- | kernel/sched/fair.c | 16 |
1 files changed, 9 insertions, 7 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index e946ca0b1ecd..922d69031661 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -6948,18 +6948,19 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags) int rq_h_nr_running = rq->cfs.h_nr_running; u64 slice = 0; - if (flags & ENQUEUE_DELAYED) { - requeue_delayed_entity(se); - return; - } - /* * The code below (indirectly) updates schedutil which looks at * the cfs_rq utilization to select a frequency. * Let's add the task's estimated utilization to the cfs_rq's * estimated utilization, before we update schedutil. */ - util_est_enqueue(&rq->cfs, p); + if (!(p->se.sched_delayed && (task_on_rq_migrating(p) || (flags & ENQUEUE_RESTORE)))) + util_est_enqueue(&rq->cfs, p); + + if (flags & ENQUEUE_DELAYED) { + requeue_delayed_entity(se); + return; + } /* * If in_iowait is set, the code below may not trigger any cpufreq @@ -7177,7 +7178,8 @@ static int dequeue_entities(struct rq *rq, struct sched_entity *se, int flags) */ static bool dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags) { - util_est_dequeue(&rq->cfs, p); + if (!(p->se.sched_delayed && (task_on_rq_migrating(p) || (flags & DEQUEUE_SAVE)))) + util_est_dequeue(&rq->cfs, p); if (dequeue_entities(rq, &p->se, flags) < 0) { util_est_update(&rq->cfs, p, DEQUEUE_SLEEP); |