summaryrefslogtreecommitdiff
path: root/kernel/sched/core.c
diff options
context:
space:
mode:
authorMark Brown <broonie@kernel.org>2022-02-09 14:32:59 +0000
committerMark Brown <broonie@kernel.org>2022-02-09 14:32:59 +0000
commit2cbfa2128662c6b841f68cd2fe54df199457e38a (patch)
tree868184bca677f97fa7bb9f3dad5de2f39543d769 /kernel/sched/core.c
parentc17756beacf4d0b21c0e877bcfe09af645129b4e (diff)
parenta0386bba70934d42f586eaf68b21d5eeaffa7bd0 (diff)
spi: make remove callback a void function
Merge series from Uwe Kleine-König <u.kleine-koenig@pengutronix.de>: this series goal is to change the spi remove callback's return value to void. After numerous patches nearly all drivers already return 0 unconditionally. The four first patches in this series convert the remaining three drivers to return 0, the final patch changes the remove prototype and converts all implementers. base-commit: 26291c54e111ff6ba87a164d85d4a4e134b7315c
Diffstat (limited to 'kernel/sched/core.c')
-rw-r--r--kernel/sched/core.c15
1 files changed, 4 insertions, 11 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 2e4ae00e52d1..848eaa0efe0e 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -5822,8 +5822,7 @@ pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
}
if (schedstat_enabled() && rq->core->core_forceidle_count) {
- if (cookie)
- rq->core->core_forceidle_start = rq_clock(rq->core);
+ rq->core->core_forceidle_start = rq_clock(rq->core);
rq->core->core_forceidle_occupation = occ;
}
@@ -8219,9 +8218,7 @@ int __cond_resched_lock(spinlock_t *lock)
if (spin_needbreak(lock) || resched) {
spin_unlock(lock);
- if (resched)
- preempt_schedule_common();
- else
+ if (!_cond_resched())
cpu_relax();
ret = 1;
spin_lock(lock);
@@ -8239,9 +8236,7 @@ int __cond_resched_rwlock_read(rwlock_t *lock)
if (rwlock_needbreak(lock) || resched) {
read_unlock(lock);
- if (resched)
- preempt_schedule_common();
- else
+ if (!_cond_resched())
cpu_relax();
ret = 1;
read_lock(lock);
@@ -8259,9 +8254,7 @@ int __cond_resched_rwlock_write(rwlock_t *lock)
if (rwlock_needbreak(lock) || resched) {
write_unlock(lock);
- if (resched)
- preempt_schedule_common();
- else
+ if (!_cond_resched())
cpu_relax();
ret = 1;
write_lock(lock);