aboutsummaryrefslogtreecommitdiff
path: root/mm/page_alloc.c
diff options
context:
space:
mode:
authorGravatar Mauro Carvalho Chehab <mchehab+samsung@kernel.org> 2018-12-05 13:16:24 -0500
committerGravatar Mauro Carvalho Chehab <mchehab+samsung@kernel.org> 2018-12-05 13:23:38 -0500
commit14a4467a0a5eacb2ebbe3aab1b4e25af3519c76a (patch)
tree541647056606d3e0aa1e06e62d2b40417623f8cb /mm/page_alloc.c
parentmedia: pixfmt-meta-d4xx.rst: Add a license to it (diff)
parentMerge tag 'media/v4.20-4' of git://git.kernel.org/pub/scm/linux/kernel/git/mc... (diff)
downloadlinux-14a4467a0a5eacb2ebbe3aab1b4e25af3519c76a.tar.gz
linux-14a4467a0a5eacb2ebbe3aab1b4e25af3519c76a.tar.bz2
linux-14a4467a0a5eacb2ebbe3aab1b4e25af3519c76a.zip
Merge commit '0072a0c14d5b7cb72c611d396f143f5dcd73ebe2' into patchwork
Merge from Upstream after the latest media fixes branch, because we need one patch that it is there. * commit '0072a0c14d5b7cb72c611d396f143f5dcd73ebe2': (1108 commits) ide: Change to use DEFINE_SHOW_ATTRIBUTE macro ide: pmac: add of_node_put() drivers/tty: add missing of_node_put() drivers/sbus/char: add of_node_put() sbus: char: add of_node_put() Linux 4.20-rc5 PCI: Fix incorrect value returned from pcie_get_speed_cap() MAINTAINERS: Update linux-mips mailing list address ocfs2: fix potential use after free mm/khugepaged: fix the xas_create_range() error path mm/khugepaged: collapse_shmem() do not crash on Compound mm/khugepaged: collapse_shmem() without freezing new_page mm/khugepaged: minor reorderings in collapse_shmem() mm/khugepaged: collapse_shmem() remember to clear holes mm/khugepaged: fix crashes due to misaccounted holes mm/khugepaged: collapse_shmem() stop if punched or truncated mm/huge_memory: fix lockdep complaint on 32-bit i_size_read() mm/huge_memory: splitting set mapping+index before unfreeze mm/huge_memory: rename freeze_page() to unmap_page() initramfs: clean old path before creating a hardlink ...
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r--mm/page_alloc.c32
1 files changed, 20 insertions, 12 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index a919ba5cb3c8..2ec9cc407216 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -4061,17 +4061,6 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
int reserve_flags;
/*
- * In the slowpath, we sanity check order to avoid ever trying to
- * reclaim >= MAX_ORDER areas which will never succeed. Callers may
- * be using allocators in order of preference for an area that is
- * too large.
- */
- if (order >= MAX_ORDER) {
- WARN_ON_ONCE(!(gfp_mask & __GFP_NOWARN));
- return NULL;
- }
-
- /*
* We also sanity check to catch abuse of atomic reserves being used by
* callers that are not in atomic context.
*/
@@ -4364,6 +4353,15 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, int preferred_nid,
gfp_t alloc_mask; /* The gfp_t that was actually used for allocation */
struct alloc_context ac = { };
+ /*
+ * There are several places where we assume that the order value is sane
+ * so bail out early if the request is out of bound.
+ */
+ if (unlikely(order >= MAX_ORDER)) {
+ WARN_ON_ONCE(!(gfp_mask & __GFP_NOWARN));
+ return NULL;
+ }
+
gfp_mask &= gfp_allowed_mask;
alloc_mask = gfp_mask;
if (!prepare_alloc_pages(gfp_mask, order, preferred_nid, nodemask, &ac, &alloc_mask, &alloc_flags))
@@ -5815,8 +5813,10 @@ void __meminit init_currently_empty_zone(struct zone *zone,
unsigned long size)
{
struct pglist_data *pgdat = zone->zone_pgdat;
+ int zone_idx = zone_idx(zone) + 1;
- pgdat->nr_zones = zone_idx(zone) + 1;
+ if (zone_idx > pgdat->nr_zones)
+ pgdat->nr_zones = zone_idx;
zone->zone_start_pfn = zone_start_pfn;
@@ -7789,6 +7789,14 @@ bool has_unmovable_pages(struct zone *zone, struct page *page, int count,
goto unmovable;
/*
+ * If the zone is movable and we have ruled out all reserved
+ * pages then it should be reasonably safe to assume the rest
+ * is movable.
+ */
+ if (zone_idx(zone) == ZONE_MOVABLE)
+ continue;
+
+ /*
* Hugepages are not in LRU lists, but they're movable.
* We need not scan over tail pages bacause we don't
* handle each tail page individually in migration.