aboutsummaryrefslogtreecommitdiff
path: root/kernel/sched/core.c
diff options
context:
space:
mode:
authorGravatar Peter Zijlstra <peterz@infradead.org> 2023-08-01 22:41:28 +0200
committerGravatar Peter Zijlstra <peterz@infradead.org> 2023-08-14 17:01:26 +0200
commit6dafc713e3b0d8ffbd696d200d8c9dd212ddcdfc (patch)
treed407753bbe2616b85fa3344ae227fb55e05837d5 /kernel/sched/core.c
parentsched: Simplify sched_exec() (diff)
downloadlinux-6dafc713e3b0d8ffbd696d200d8c9dd212ddcdfc.tar.gz
linux-6dafc713e3b0d8ffbd696d200d8c9dd212ddcdfc.tar.bz2
linux-6dafc713e3b0d8ffbd696d200d8c9dd212ddcdfc.zip
sched: Simplify sched_tick_remote()
Use guards to reduce gotos and simplify control flow. Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Reviewed-by: Valentin Schneider <vschneid@redhat.com> Link: https://lore.kernel.org/r/20230801211812.236247952@infradead.org
Diffstat (limited to 'kernel/sched/core.c')
-rw-r--r--kernel/sched/core.c39
1 files changed, 16 insertions, 23 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index cd7f2ed1377d..1b2fa91a1ef5 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -5721,9 +5721,6 @@ static void sched_tick_remote(struct work_struct *work)
struct tick_work *twork = container_of(dwork, struct tick_work, work);
int cpu = twork->cpu;
struct rq *rq = cpu_rq(cpu);
- struct task_struct *curr;
- struct rq_flags rf;
- u64 delta;
int os;
/*
@@ -5733,30 +5730,26 @@ static void sched_tick_remote(struct work_struct *work)
* statistics and checks timeslices in a time-independent way, regardless
* of when exactly it is running.
*/
- if (!tick_nohz_tick_stopped_cpu(cpu))
- goto out_requeue;
+ if (tick_nohz_tick_stopped_cpu(cpu)) {
+ guard(rq_lock_irq)(rq);
+ struct task_struct *curr = rq->curr;
- rq_lock_irq(rq, &rf);
- curr = rq->curr;
- if (cpu_is_offline(cpu))
- goto out_unlock;
+ if (cpu_online(cpu)) {
+ update_rq_clock(rq);
- update_rq_clock(rq);
+ if (!is_idle_task(curr)) {
+ /*
+ * Make sure the next tick runs within a
+ * reasonable amount of time.
+ */
+ u64 delta = rq_clock_task(rq) - curr->se.exec_start;
+ WARN_ON_ONCE(delta > (u64)NSEC_PER_SEC * 3);
+ }
+ curr->sched_class->task_tick(rq, curr, 0);
- if (!is_idle_task(curr)) {
- /*
- * Make sure the next tick runs within a reasonable
- * amount of time.
- */
- delta = rq_clock_task(rq) - curr->se.exec_start;
- WARN_ON_ONCE(delta > (u64)NSEC_PER_SEC * 3);
+ calc_load_nohz_remote(rq);
+ }
}
- curr->sched_class->task_tick(rq, curr, 0);
-
- calc_load_nohz_remote(rq);
-out_unlock:
- rq_unlock_irq(rq, &rf);
-out_requeue:
/*
* Run the remote tick once per second (1Hz). This arbitrary