aboutsummaryrefslogtreecommitdiff
path: root/mm/page_alloc.c
diff options
context:
space:
mode:
authorGravatar Wei Yang <richard.weiyang@gmail.com> 2022-04-28 23:16:01 -0700
committerGravatar akpm <akpm@linux-foundation.org> 2022-04-28 23:16:01 -0700
commit379313241e77abc18258da1afd49d111c72c5a3d (patch)
treee80393dcb8fbc40dbf7aecd4bee4bd88b3532aca /mm/page_alloc.c
parentDocumentation/sysctl: document page_lock_unfairness (diff)
downloadlinux-379313241e77abc18258da1afd49d111c72c5a3d.tar.gz
linux-379313241e77abc18258da1afd49d111c72c5a3d.tar.bz2
linux-379313241e77abc18258da1afd49d111c72c5a3d.zip
mm/page_alloc: adding same penalty is enough to get round-robin order
To make node order in round-robin in the same distance group, we add a penalty to the first node we got in each round. To get a round-robin order in the same distance group, we don't need to decrease the penalty since: * find_next_best_node() always iterates node in the same order * distance matters more then penalty in find_next_best_node() * in nodes with the same distance, the first one would be picked up So it is fine to increase same penalty when we get the first node in the same distance group. Since we just increase a constance of 1 to node penalty, it is not necessary to multiply MAX_NODE_LOAD for preference. [richard.weiyang@gmail.com: remove remove MAX_NODE_LOAD, per Vlastimil] Link: https://lkml.kernel.org/r/20220412001319.7462-1-richard.weiyang@gmail.com Link: https://lkml.kernel.org/r/20220123013537.20491-1-richard.weiyang@gmail.com Signed-off-by: Wei Yang <richard.weiyang@gmail.com> Signed-off-by: Wei Yang <richard.weiyang@gmail.com> Acked-by: Vlastimil Babka <vbabka@suse.cz> Acked-by: David Hildenbrand <david@redhat.com> Acked-by: Oscar Salvador <osalvador@suse.de> Cc: David Rientjes <rientjes@google.com> Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Cc: Krupa Ramakrishnan <krupa.ramakrishnan@amd.com> Cc: Michal Hocko <mhocko@suse.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r--mm/page_alloc.c9
1 files changed, 3 insertions, 6 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 0e42038382c1..66a5198157ad 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -6171,7 +6171,6 @@ int numa_zonelist_order_handler(struct ctl_table *table, int write,
}
-#define MAX_NODE_LOAD (nr_online_nodes)
static int node_load[MAX_NUMNODES];
/**
@@ -6218,7 +6217,7 @@ int find_next_best_node(int node, nodemask_t *used_node_mask)
val += PENALTY_FOR_NODE_WITH_CPUS;
/* Slight preference for less loaded node */
- val *= (MAX_NODE_LOAD*MAX_NUMNODES);
+ val *= MAX_NUMNODES;
val += node_load[n];
if (val < min_val) {
@@ -6284,13 +6283,12 @@ static void build_thisnode_zonelists(pg_data_t *pgdat)
static void build_zonelists(pg_data_t *pgdat)
{
static int node_order[MAX_NUMNODES];
- int node, load, nr_nodes = 0;
+ int node, nr_nodes = 0;
nodemask_t used_mask = NODE_MASK_NONE;
int local_node, prev_node;
/* NUMA-aware ordering of nodes */
local_node = pgdat->node_id;
- load = nr_online_nodes;
prev_node = local_node;
memset(node_order, 0, sizeof(node_order));
@@ -6302,11 +6300,10 @@ static void build_zonelists(pg_data_t *pgdat)
*/
if (node_distance(local_node, node) !=
node_distance(local_node, prev_node))
- node_load[node] += load;
+ node_load[node] += 1;
node_order[nr_nodes++] = node;
prev_node = node;
- load--;
}
build_zonelists_in_node_order(pgdat, node_order, nr_nodes);