aboutsummaryrefslogtreecommitdiff
path: root/kernel/sched
diff options
context:
space:
mode:
authorGravatar Ingo Molnar <mingo@kernel.org> 2024-03-08 12:18:17 +0100
committerGravatar Ingo Molnar <mingo@kernel.org> 2024-03-12 12:00:00 +0100
commit646ebaf51c64c6416ca89765c20041363fc1b518 (patch)
tree7ccb0c9e054817ac0c2808c479feb0dad83fcffe /kernel/sched
parentsched/balancing: Rename newidle_balance() => sched_balance_newidle() (diff)
downloadlinux-646ebaf51c64c6416ca89765c20041363fc1b518.tar.gz
linux-646ebaf51c64c6416ca89765c20041363fc1b518.tar.bz2
linux-646ebaf51c64c6416ca89765c20041363fc1b518.zip
sched/balancing: Rename find_idlest_group_cpu() => sched_balance_find_dst_group_cpu()
Standardize scheduler load-balancing function names on the sched_balance_() prefix. Also use 'dst' instead of 'idlest': while historically correct, today it's not really true anymore that we return the 'idlest' group or CPU, we sort by idle-exit latency and only return the idlest CPUs from the lowest-latency set of CPUs. The true 'idlest' CPUs often remain idle for a long time and are never returned as long as the system is under-loaded. Signed-off-by: Ingo Molnar <mingo@kernel.org> Reviewed-by: Shrikanth Hegde <sshegde@linux.ibm.com> Link: https://lore.kernel.org/r/20240308111819.1101550-12-mingo@kernel.org
Diffstat (limited to 'kernel/sched')
-rw-r--r--kernel/sched/fair.c6
1 files changed, 3 insertions, 3 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index aa5ff0efcca8..02ff0272b2e4 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -7101,10 +7101,10 @@ static struct sched_group *
find_idlest_group(struct sched_domain *sd, struct task_struct *p, int this_cpu);
/*
- * find_idlest_group_cpu - find the idlest CPU among the CPUs in the group.
+ * sched_balance_find_dst_group_cpu - find the idlest CPU among the CPUs in the group.
*/
static int
-find_idlest_group_cpu(struct sched_group *group, struct task_struct *p, int this_cpu)
+sched_balance_find_dst_group_cpu(struct sched_group *group, struct task_struct *p, int this_cpu)
{
unsigned long load, min_load = ULONG_MAX;
unsigned int min_exit_latency = UINT_MAX;
@@ -7191,7 +7191,7 @@ static inline int find_idlest_cpu(struct sched_domain *sd, struct task_struct *p
continue;
}
- new_cpu = find_idlest_group_cpu(group, p, cpu);
+ new_cpu = sched_balance_find_dst_group_cpu(group, p, cpu);
if (new_cpu == cpu) {
/* Now try balancing at a lower domain level of 'cpu': */
sd = sd->child;