From b01af5c0b0414f96e6c3891e704d1c40faa18813 Mon Sep 17 00:00:00 2001 From: Hyeonggon Yoo <42.hyeyoo@gmail.com> Date: Sun, 12 Dec 2021 06:52:41 +0000 Subject: mm/slob: Remove unnecessary page_mapcount_reset() function call After commit 401fb12c68c2 ("mm/sl*b: Differentiate struct slab fields by sl*b implementations"), we can reorder fields of struct slab depending on slab allocator. For now, page_mapcount_reset() is called because page->_mapcount and slab->units have same offset. But this is not necessary for struct slab. Use unused field for units instead. Signed-off-by: Hyeonggon Yoo <42.hyeyoo@gmail.com> Signed-off-by: Vlastimil Babka Link: https://lore.kernel.org/r/20211212065241.GA886691@odroid --- mm/slab.h | 4 ++-- mm/slob.c | 1 - 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/mm/slab.h b/mm/slab.h index f14e723b9e3c..95b9a74a2d51 100644 --- a/mm/slab.h +++ b/mm/slab.h @@ -50,8 +50,8 @@ struct slab { struct list_head slab_list; void *__unused_1; void *freelist; /* first free block */ - void *__unused_2; - int units; + long units; + unsigned int __unused_2; #else #error "Unexpected slab allocator configured" diff --git a/mm/slob.c b/mm/slob.c index 3c6cadbbc238..60c5842215f1 100644 --- a/mm/slob.c +++ b/mm/slob.c @@ -404,7 +404,6 @@ static void slob_free(void *block, int size) clear_slob_page_free(sp); spin_unlock_irqrestore(&slob_lock, flags); __folio_clear_slab(slab_folio(sp)); - page_mapcount_reset(slab_page(sp)); slob_free_pages(b, 0); return; } -- cgit v1.2.3