aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorGravatar Matthew Wilcox (Oracle) <willy@infradead.org> 2022-01-21 10:44:52 -0500
committerGravatar Matthew Wilcox (Oracle) <willy@infradead.org> 2022-03-21 12:59:03 -0400
commitaf28a988b313a601c12c410a42e485ca46adcfee (patch)
tree2c5e610739fccb373154c4abdd1406de7330c127
parentmm/rmap: Turn page_referenced() into folio_referenced() (diff)
downloadlinux-af28a988b313a601c12c410a42e485ca46adcfee.tar.gz
linux-af28a988b313a601c12c410a42e485ca46adcfee.tar.bz2
linux-af28a988b313a601c12c410a42e485ca46adcfee.zip
mm/huge_memory: Convert __split_huge_pmd() to take a folio
Convert split_huge_pmd_address() at the same time since it only passes the folio through, and its two callers already have a folio on hand. Removes numerous calls to compound_head() and removes an assumption that a page cannot be larger than a PMD. Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
-rw-r--r--include/linux/huge_mm.h8
-rw-r--r--mm/huge_memory.c46
-rw-r--r--mm/rmap.c6
3 files changed, 31 insertions, 29 deletions
diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
index 71c073d411ac..4368b314d9c8 100644
--- a/include/linux/huge_mm.h
+++ b/include/linux/huge_mm.h
@@ -194,7 +194,7 @@ static inline int split_huge_page(struct page *page)
void deferred_split_huge_page(struct page *page);
void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
- unsigned long address, bool freeze, struct page *page);
+ unsigned long address, bool freeze, struct folio *folio);
#define split_huge_pmd(__vma, __pmd, __address) \
do { \
@@ -207,7 +207,7 @@ void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
void split_huge_pmd_address(struct vm_area_struct *vma, unsigned long address,
- bool freeze, struct page *page);
+ bool freeze, struct folio *folio);
void __split_huge_pud(struct vm_area_struct *vma, pud_t *pud,
unsigned long address);
@@ -406,9 +406,9 @@ static inline void deferred_split_huge_page(struct page *page) {}
do { } while (0)
static inline void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
- unsigned long address, bool freeze, struct page *page) {}
+ unsigned long address, bool freeze, struct folio *folio) {}
static inline void split_huge_pmd_address(struct vm_area_struct *vma,
- unsigned long address, bool freeze, struct page *page) {}
+ unsigned long address, bool freeze, struct folio *folio) {}
#define split_huge_pud(__vma, __pmd, __address) \
do { } while (0)
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index beebe4105659..583b735a079b 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -2113,11 +2113,11 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
}
void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
- unsigned long address, bool freeze, struct page *page)
+ unsigned long address, bool freeze, struct folio *folio)
{
spinlock_t *ptl;
struct mmu_notifier_range range;
- bool do_unlock_page = false;
+ bool do_unlock_folio = false;
pmd_t _pmd;
mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, vma->vm_mm,
@@ -2127,20 +2127,20 @@ void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
ptl = pmd_lock(vma->vm_mm, pmd);
/*
- * If caller asks to setup a migration entries, we need a page to check
- * pmd against. Otherwise we can end up replacing wrong page.
+ * If caller asks to setup a migration entry, we need a folio to check
+ * pmd against. Otherwise we can end up replacing wrong folio.
*/
- VM_BUG_ON(freeze && !page);
- if (page) {
- VM_WARN_ON_ONCE(!PageLocked(page));
- if (page != pmd_page(*pmd))
+ VM_BUG_ON(freeze && !folio);
+ if (folio) {
+ VM_WARN_ON_ONCE(!folio_test_locked(folio));
+ if (folio != page_folio(pmd_page(*pmd)))
goto out;
}
repeat:
if (pmd_trans_huge(*pmd)) {
- if (!page) {
- page = pmd_page(*pmd);
+ if (!folio) {
+ folio = page_folio(pmd_page(*pmd));
/*
* An anonymous page must be locked, to ensure that a
* concurrent reuse_swap_page() sees stable mapcount;
@@ -2148,22 +2148,22 @@ repeat:
* and page lock must not be taken when zap_pmd_range()
* calls __split_huge_pmd() while i_mmap_lock is held.
*/
- if (PageAnon(page)) {
- if (unlikely(!trylock_page(page))) {
- get_page(page);
+ if (folio_test_anon(folio)) {
+ if (unlikely(!folio_trylock(folio))) {
+ folio_get(folio);
_pmd = *pmd;
spin_unlock(ptl);
- lock_page(page);
+ folio_lock(folio);
spin_lock(ptl);
if (unlikely(!pmd_same(*pmd, _pmd))) {
- unlock_page(page);
- put_page(page);
- page = NULL;
+ folio_unlock(folio);
+ folio_put(folio);
+ folio = NULL;
goto repeat;
}
- put_page(page);
+ folio_put(folio);
}
- do_unlock_page = true;
+ do_unlock_folio = true;
}
}
} else if (!(pmd_devmap(*pmd) || is_pmd_migration_entry(*pmd)))
@@ -2171,8 +2171,8 @@ repeat:
__split_huge_pmd_locked(vma, pmd, range.start, freeze);
out:
spin_unlock(ptl);
- if (do_unlock_page)
- unlock_page(page);
+ if (do_unlock_folio)
+ folio_unlock(folio);
/*
* No need to double call mmu_notifier->invalidate_range() callback.
* They are 3 cases to consider inside __split_huge_pmd_locked():
@@ -2190,7 +2190,7 @@ out:
}
void split_huge_pmd_address(struct vm_area_struct *vma, unsigned long address,
- bool freeze, struct page *page)
+ bool freeze, struct folio *folio)
{
pgd_t *pgd;
p4d_t *p4d;
@@ -2211,7 +2211,7 @@ void split_huge_pmd_address(struct vm_area_struct *vma, unsigned long address,
pmd = pmd_offset(pud, address);
- __split_huge_pmd(vma, pmd, address, freeze, page);
+ __split_huge_pmd(vma, pmd, address, freeze, folio);
}
static inline void split_huge_pmd_if_needed(struct vm_area_struct *vma, unsigned long address)
diff --git a/mm/rmap.c b/mm/rmap.c
index 36eef54d05d8..8b3d44e56e30 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -1410,6 +1410,7 @@ out:
static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
unsigned long address, void *arg)
{
+ struct folio *folio = page_folio(page);
struct mm_struct *mm = vma->vm_mm;
DEFINE_PAGE_VMA_WALK(pvmw, page, vma, address, 0);
pte_t pteval;
@@ -1428,7 +1429,7 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
pvmw.flags = PVMW_SYNC;
if (flags & TTU_SPLIT_HUGE_PMD)
- split_huge_pmd_address(vma, address, false, page);
+ split_huge_pmd_address(vma, address, false, folio);
/*
* For THP, we have to assume the worse case ie pmd for invalidation.
@@ -1700,6 +1701,7 @@ void try_to_unmap(struct page *page, enum ttu_flags flags)
static bool try_to_migrate_one(struct page *page, struct vm_area_struct *vma,
unsigned long address, void *arg)
{
+ struct folio *folio = page_folio(page);
struct mm_struct *mm = vma->vm_mm;
DEFINE_PAGE_VMA_WALK(pvmw, page, vma, address, 0);
pte_t pteval;
@@ -1722,7 +1724,7 @@ static bool try_to_migrate_one(struct page *page, struct vm_area_struct *vma,
* TTU_SPLIT_HUGE_PMD and it wants to freeze.
*/
if (flags & TTU_SPLIT_HUGE_PMD)
- split_huge_pmd_address(vma, address, true, page);
+ split_huge_pmd_address(vma, address, true, folio);
/*
* For THP, we have to assume the worse case ie pmd for invalidation.