aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorGravatar Peter Zijlstra <peterz@infradead.org> 2023-06-09 16:58:23 +0200
committerGravatar Ingo Molnar <mingo@kernel.org> 2023-09-13 15:01:28 +0200
commit7a50f76674f8b6f4f30a1cec954179f10e20110c (patch)
tree8df1d8c4adac0e9976c079a1b1dda695a5cf1962
parentsched: Simplify sched_{set,get}affinity() (diff)
downloadlinux-7a50f76674f8b6f4f30a1cec954179f10e20110c.tar.gz
linux-7a50f76674f8b6f4f30a1cec954179f10e20110c.tar.bz2
linux-7a50f76674f8b6f4f30a1cec954179f10e20110c.zip
sched: Simplify yield_to()
Use guards to reduce gotos and simplify control flow. Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r--kernel/sched/core.c67
1 files changed, 29 insertions, 38 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 1d5cbb305057..6c8c40a54560 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -8888,55 +8888,46 @@ int __sched yield_to(struct task_struct *p, bool preempt)
{
struct task_struct *curr = current;
struct rq *rq, *p_rq;
- unsigned long flags;
int yielded = 0;
- local_irq_save(flags);
- rq = this_rq();
+ scoped_guard (irqsave) {
+ rq = this_rq();
again:
- p_rq = task_rq(p);
- /*
- * If we're the only runnable task on the rq and target rq also
- * has only one task, there's absolutely no point in yielding.
- */
- if (rq->nr_running == 1 && p_rq->nr_running == 1) {
- yielded = -ESRCH;
- goto out_irq;
- }
+ p_rq = task_rq(p);
+ /*
+ * If we're the only runnable task on the rq and target rq also
+ * has only one task, there's absolutely no point in yielding.
+ */
+ if (rq->nr_running == 1 && p_rq->nr_running == 1)
+ return -ESRCH;
- double_rq_lock(rq, p_rq);
- if (task_rq(p) != p_rq) {
- double_rq_unlock(rq, p_rq);
- goto again;
- }
+ guard(double_rq_lock)(rq, p_rq);
+ if (task_rq(p) != p_rq)
+ goto again;
- if (!curr->sched_class->yield_to_task)
- goto out_unlock;
+ if (!curr->sched_class->yield_to_task)
+ return 0;
- if (curr->sched_class != p->sched_class)
- goto out_unlock;
+ if (curr->sched_class != p->sched_class)
+ return 0;
- if (task_on_cpu(p_rq, p) || !task_is_running(p))
- goto out_unlock;
+ if (task_on_cpu(p_rq, p) || !task_is_running(p))
+ return 0;
- yielded = curr->sched_class->yield_to_task(rq, p);
- if (yielded) {
- schedstat_inc(rq->yld_count);
- /*
- * Make p's CPU reschedule; pick_next_entity takes care of
- * fairness.
- */
- if (preempt && rq != p_rq)
- resched_curr(p_rq);
+ yielded = curr->sched_class->yield_to_task(rq, p);
+ if (yielded) {
+ schedstat_inc(rq->yld_count);
+ /*
+ * Make p's CPU reschedule; pick_next_entity
+ * takes care of fairness.
+ */
+ if (preempt && rq != p_rq)
+ resched_curr(p_rq);
+ }
}
-out_unlock:
- double_rq_unlock(rq, p_rq);
-out_irq:
- local_irq_restore(flags);
-
- if (yielded > 0)
+ if (yielded)
schedule();
return yielded;