aboutsummaryrefslogtreecommitdiff
path: root/kernel/sched
diff options
context:
space:
mode:
authorGravatar Ingo Molnar <mingo@kernel.org> 2024-03-08 12:18:14 +0100
committerGravatar Ingo Molnar <mingo@kernel.org> 2024-03-12 12:00:00 +0100
commit82cf921432fc184adbbb9c1bced182564876ec5e (patch)
tree2aec44f8f0f4a8e391bc1cad69cc82c35ec44c9c /kernel/sched
parentsched/balancing: Rename find_busiest_queue() => sched_balance_find_src_rq() (diff)
downloadlinux-82cf921432fc184adbbb9c1bced182564876ec5e.tar.gz
linux-82cf921432fc184adbbb9c1bced182564876ec5e.tar.bz2
linux-82cf921432fc184adbbb9c1bced182564876ec5e.zip
sched/balancing: Rename find_busiest_group() => sched_balance_find_src_group()
Make two naming changes: 1) Standardize scheduler load-balancing function names on the sched_balance_() prefix. 2) Similar to find_busiest_queue(), the find_busiest_group() naming has become a bit of a misnomer: the 'busiest' qualifier to this function was historically correct but in the current code in quite a few cases we will not pick the 'busiest' group - but the best (possible) group we can balance from based on a complex set of constraints. So name it a bit more neutrally, similar to the 'src/dst' nomenclature we are already using when moving tasks between runqueues, and also use the sched_balance_ prefix: sched_balance_find_src_group(). Signed-off-by: Ingo Molnar <mingo@kernel.org> Reviewed-by: Shrikanth Hegde <sshegde@linux.ibm.com> Link: https://lore.kernel.org/r/20240308111819.1101550-9-mingo@kernel.org
Diffstat (limited to 'kernel/sched')
-rw-r--r--kernel/sched/fair.c14
1 files changed, 7 insertions, 7 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 1cd9a18b35e0..96a81b2fa281 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -9430,7 +9430,7 @@ static void update_blocked_averages(int cpu)
rq_unlock_irqrestore(rq, &rf);
}
-/********** Helpers for find_busiest_group ************************/
+/********** Helpers for sched_balance_find_src_group ************************/
/*
* sg_lb_stats - stats of a sched_group required for load-balancing:
@@ -9637,7 +9637,7 @@ static inline int check_misfit_status(struct rq *rq, struct sched_domain *sd)
*
* When this is so detected; this group becomes a candidate for busiest; see
* update_sd_pick_busiest(). And calculate_imbalance() and
- * find_busiest_group() avoid some of the usual balance conditions to allow it
+ * sched_balance_find_src_group() avoid some of the usual balance conditions to allow it
* to create an effective group imbalance.
*
* This is a somewhat tricky proposition since the next run might not find the
@@ -10788,7 +10788,7 @@ static inline void calculate_imbalance(struct lb_env *env, struct sd_lb_stats *s
) / SCHED_CAPACITY_SCALE;
}
-/******* find_busiest_group() helpers end here *********************/
+/******* sched_balance_find_src_group() helpers end here *********************/
/*
* Decision matrix according to the local and busiest group type:
@@ -10811,7 +10811,7 @@ static inline void calculate_imbalance(struct lb_env *env, struct sd_lb_stats *s
*/
/**
- * find_busiest_group - Returns the busiest group within the sched_domain
+ * sched_balance_find_src_group - Returns the busiest group within the sched_domain
* if there is an imbalance.
* @env: The load balancing environment.
*
@@ -10820,7 +10820,7 @@ static inline void calculate_imbalance(struct lb_env *env, struct sd_lb_stats *s
*
* Return: - The busiest group if imbalance exists.
*/
-static struct sched_group *find_busiest_group(struct lb_env *env)
+static struct sched_group *sched_balance_find_src_group(struct lb_env *env)
{
struct sg_lb_stats *local, *busiest;
struct sd_lb_stats sds;
@@ -11274,7 +11274,7 @@ redo:
goto out_balanced;
}
- group = find_busiest_group(&env);
+ group = sched_balance_find_src_group(&env);
if (!group) {
schedstat_inc(sd->lb_nobusyg[idle]);
goto out_balanced;
@@ -11298,7 +11298,7 @@ redo:
env.flags |= LBF_ALL_PINNED;
if (busiest->nr_running > 1) {
/*
- * Attempt to move tasks. If find_busiest_group has found
+ * Attempt to move tasks. If sched_balance_find_src_group has found
* an imbalance but busiest->nr_running <= 1, the group is
* still unbalanced. ld_moved simply stays zero, so it is
* correctly treated as an imbalance.