From 1fb08ac63beedf58e2ae9f229ea1f9474949a185 Mon Sep 17 00:00:00 2001 From: Yang Shi Date: Wed, 30 Jun 2021 18:52:01 -0700 Subject: mm: rmap: make try_to_unmap() void function Currently try_to_unmap() return bool value by checking page_mapcount(), however this may return false positive since page_mapcount() doesn't check all subpages of compound page. The total_mapcount() could be used instead, but its cost is higher since it traverses all subpages. Actually the most callers of try_to_unmap() don't care about the return value at all. So just need check if page is still mapped by page_mapped() when necessary. And page_mapped() does bail out early when it finds mapped subpage. Link: https://lkml.kernel.org/r/bb27e3fe-6036-b637-5086-272befbfe3da@google.com Suggested-by: Hugh Dickins Signed-off-by: Yang Shi Acked-by: Minchan Kim Reviewed-by: Shakeel Butt Acked-by: Kirill A. Shutemov Signed-off-by: Hugh Dickins Acked-by: Naoya Horiguchi Cc: Alistair Popple Cc: Jan Kara Cc: Jue Wang Cc: "Matthew Wilcox (Oracle)" Cc: Miaohe Lin Cc: Oscar Salvador Cc: Peter Xu Cc: Ralph Campbell Cc: Wang Yugui Cc: Zi Yan Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/rmap.h | 2 +- mm/memory-failure.c | 15 +++++++-------- mm/rmap.c | 15 ++++----------- mm/vmscan.c | 3 ++- 4 files changed, 14 insertions(+), 21 deletions(-) diff --git a/include/linux/rmap.h b/include/linux/rmap.h index 8d04e7deedc6..ed31a559e857 100644 --- a/include/linux/rmap.h +++ b/include/linux/rmap.h @@ -195,7 +195,7 @@ static inline void page_dup_rmap(struct page *page, bool compound) int page_referenced(struct page *, int is_locked, struct mem_cgroup *memcg, unsigned long *vm_flags); -bool try_to_unmap(struct page *, enum ttu_flags flags); +void try_to_unmap(struct page *, enum ttu_flags flags); /* Avoid racy checks */ #define PVMW_SYNC (1 << 0) diff --git a/mm/memory-failure.c b/mm/memory-failure.c index 9d2d31ffe8a4..419d92b3225d 100644 --- a/mm/memory-failure.c +++ b/mm/memory-failure.c @@ -1269,7 +1269,7 @@ static bool hwpoison_user_mappings(struct page *p, unsigned long pfn, enum ttu_flags ttu = TTU_IGNORE_MLOCK; struct address_space *mapping; LIST_HEAD(tokill); - bool unmap_success = true; + bool unmap_success; int kill = 1, forcekill; struct page *hpage = *hpagep; bool mlocked = PageMlocked(hpage); @@ -1332,7 +1332,7 @@ static bool hwpoison_user_mappings(struct page *p, unsigned long pfn, collect_procs(hpage, &tokill, flags & MF_ACTION_REQUIRED); if (!PageHuge(hpage)) { - unmap_success = try_to_unmap(hpage, ttu); + try_to_unmap(hpage, ttu); } else { if (!PageAnon(hpage)) { /* @@ -1344,17 +1344,16 @@ static bool hwpoison_user_mappings(struct page *p, unsigned long pfn, */ mapping = hugetlb_page_mapping_lock_write(hpage); if (mapping) { - unmap_success = try_to_unmap(hpage, - ttu|TTU_RMAP_LOCKED); + try_to_unmap(hpage, ttu|TTU_RMAP_LOCKED); i_mmap_unlock_write(mapping); - } else { + } else pr_info("Memory failure: %#lx: could not lock mapping for mapped huge page\n", pfn); - unmap_success = false; - } } else { - unmap_success = try_to_unmap(hpage, ttu); + try_to_unmap(hpage, ttu); } } + + unmap_success = !page_mapped(hpage); if (!unmap_success) pr_err("Memory failure: %#lx: failed to unmap page (mapcount=%d)\n", pfn, page_mapcount(hpage)); diff --git a/mm/rmap.c b/mm/rmap.c index e05c300048e6..f9fd5bc54f0a 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -1405,7 +1405,7 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma, /* * When racing against e.g. zap_pte_range() on another cpu, * in between its ptep_get_and_clear_full() and page_remove_rmap(), - * try_to_unmap() may return false when it is about to become true, + * try_to_unmap() may return before page_mapped() has become false, * if page table locking is skipped: use TTU_SYNC to wait for that. */ if (flags & TTU_SYNC) @@ -1756,9 +1756,10 @@ static int page_not_mapped(struct page *page) * Tries to remove all the page table entries which are mapping this * page, used in the pageout path. Caller must hold the page lock. * - * If unmap is successful, return true. Otherwise, false. + * It is the caller's responsibility to check if the page is still + * mapped when needed (use TTU_SYNC to prevent accounting races). */ -bool try_to_unmap(struct page *page, enum ttu_flags flags) +void try_to_unmap(struct page *page, enum ttu_flags flags) { struct rmap_walk_control rwc = { .rmap_one = try_to_unmap_one, @@ -1783,14 +1784,6 @@ bool try_to_unmap(struct page *page, enum ttu_flags flags) rmap_walk_locked(page, &rwc); else rmap_walk(page, &rwc); - - /* - * When racing against e.g. zap_pte_range() on another cpu, - * in between its ptep_get_and_clear_full() and page_remove_rmap(), - * try_to_unmap() may return false when it is about to become true, - * if page table locking is skipped: use TTU_SYNC to wait for that. - */ - return !page_mapcount(page); } /** diff --git a/mm/vmscan.c b/mm/vmscan.c index 7b52ab166aae..e1d75e6f9ff4 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -1499,7 +1499,8 @@ static unsigned int shrink_page_list(struct list_head *page_list, if (unlikely(PageTransHuge(page))) flags |= TTU_SPLIT_HUGE_PMD; - if (!try_to_unmap(page, flags)) { + try_to_unmap(page, flags); + if (page_mapped(page)) { stat->nr_unmap_fail += nr_pages; if (!was_swapbacked && PageSwapBacked(page)) stat->nr_lazyfree_fail += nr_pages; -- cgit v1.2.3