From b5612c368648a7be52411b288d09593e5945d1aa Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Wed, 8 Nov 2023 20:46:05 +0000 Subject: mm: return void from folio_start_writeback() and related functions Nobody now checks the return value from any of these functions, so add an assertion at the beginning of the function and return void. Link: https://lkml.kernel.org/r/20231108204605.745109-5-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) Reviewed-by: Josef Bacik Cc: David Howells Cc: Steve French Signed-off-by: Andrew Morton --- mm/page-writeback.c | 54 +++++++++++++++++++++++++---------------------------- 1 file changed, 25 insertions(+), 29 deletions(-) (limited to 'mm/page-writeback.c') diff --git a/mm/page-writeback.c b/mm/page-writeback.c index ee2fd6a6af40..ca64bd513fa2 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c @@ -2982,67 +2982,63 @@ bool __folio_end_writeback(struct folio *folio) return ret; } -bool __folio_start_writeback(struct folio *folio, bool keep_write) +void __folio_start_writeback(struct folio *folio, bool keep_write) { long nr = folio_nr_pages(folio); struct address_space *mapping = folio_mapping(folio); - bool ret; int access_ret; + VM_BUG_ON_FOLIO(folio_test_writeback(folio), folio); + folio_memcg_lock(folio); if (mapping && mapping_use_writeback_tags(mapping)) { XA_STATE(xas, &mapping->i_pages, folio_index(folio)); struct inode *inode = mapping->host; struct backing_dev_info *bdi = inode_to_bdi(inode); unsigned long flags; + bool on_wblist; xas_lock_irqsave(&xas, flags); xas_load(&xas); - ret = folio_test_set_writeback(folio); - if (!ret) { - bool on_wblist; + folio_test_set_writeback(folio); - on_wblist = mapping_tagged(mapping, - PAGECACHE_TAG_WRITEBACK); + on_wblist = mapping_tagged(mapping, PAGECACHE_TAG_WRITEBACK); - xas_set_mark(&xas, PAGECACHE_TAG_WRITEBACK); - if (bdi->capabilities & BDI_CAP_WRITEBACK_ACCT) { - struct bdi_writeback *wb = inode_to_wb(inode); - - wb_stat_mod(wb, WB_WRITEBACK, nr); - if (!on_wblist) - wb_inode_writeback_start(wb); - } + xas_set_mark(&xas, PAGECACHE_TAG_WRITEBACK); + if (bdi->capabilities & BDI_CAP_WRITEBACK_ACCT) { + struct bdi_writeback *wb = inode_to_wb(inode); - /* - * We can come through here when swapping - * anonymous folios, so we don't necessarily - * have an inode to track for sync. - */ - if (mapping->host && !on_wblist) - sb_mark_inode_writeback(mapping->host); + wb_stat_mod(wb, WB_WRITEBACK, nr); + if (!on_wblist) + wb_inode_writeback_start(wb); } + + /* + * We can come through here when swapping anonymous + * folios, so we don't necessarily have an inode to + * track for sync. + */ + if (mapping->host && !on_wblist) + sb_mark_inode_writeback(mapping->host); if (!folio_test_dirty(folio)) xas_clear_mark(&xas, PAGECACHE_TAG_DIRTY); if (!keep_write) xas_clear_mark(&xas, PAGECACHE_TAG_TOWRITE); xas_unlock_irqrestore(&xas, flags); } else { - ret = folio_test_set_writeback(folio); - } - if (!ret) { - lruvec_stat_mod_folio(folio, NR_WRITEBACK, nr); - zone_stat_mod_folio(folio, NR_ZONE_WRITE_PENDING, nr); + folio_test_set_writeback(folio); } + + lruvec_stat_mod_folio(folio, NR_WRITEBACK, nr); + zone_stat_mod_folio(folio, NR_ZONE_WRITE_PENDING, nr); folio_memcg_unlock(folio); + access_ret = arch_make_folio_accessible(folio); /* * If writeback has been triggered on a page that cannot be made * accessible, it is too late to recover here. */ VM_BUG_ON_FOLIO(access_ret != 0, folio); - - return ret; } EXPORT_SYMBOL(__folio_start_writeback); -- cgit v1.2.3