summaryrefslogtreecommitdiff
path: root/kernel/sched/core.c
diff options
context:
space:
mode:
authorPeter Zijlstra <peterz@infradead.org>2024-08-14 00:25:49 +0200
committerPeter Zijlstra <peterz@infradead.org>2024-09-03 15:26:30 +0200
commit7d2180d9d943d31491d77e336557f33670cfe7fd (patch)
treef1d8261338798f8eecf3efd9c03c3c408788326a /kernel/sched/core.c
parent75b6499024a6c1a4ef0288f280534a5c54269076 (diff)
sched: Use set_next_task(.first) where required
Turns out the core_sched bits forgot to use the set_next_task(.first=true) variant. Notably: pick_next_task() := pick_task() + set_next_task(.first = true) Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Link: https://lore.kernel.org/r/20240813224015.614146342@infradead.org
Diffstat (limited to 'kernel/sched/core.c')
-rw-r--r--kernel/sched/core.c4
1 files changed, 2 insertions, 2 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 016581168cb8..406b794f8423 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -6010,7 +6010,7 @@ pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
next = rq->core_pick;
if (next != prev) {
put_prev_task(rq, prev);
- set_next_task(rq, next);
+ set_next_task_first(rq, next);
}
rq->core_pick = NULL;
@@ -6184,7 +6184,7 @@ pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
}
out_set_next:
- set_next_task(rq, next);
+ set_next_task_first(rq, next);
out:
if (rq->core->core_forceidle_count && next == rq->idle)
queue_core_balance(rq);