aboutsummaryrefslogtreecommitdiff
path: root/fs/iomap
diff options
context:
space:
mode:
authorGravatar Christoph Hellwig <hch@lst.de> 2023-12-07 08:27:02 +0100
committerGravatar Christian Brauner <brauner@kernel.org> 2024-02-01 14:20:11 +0100
commitcc9542534bf09f33b4da32025b31335588fcefb9 (patch)
treef6a8f121fe0963847ca038577ab50f98da160ca2 /fs/iomap
parentiomap: factor out a iomap_writepage_handle_eof helper (diff)
downloadlinux-cc9542534bf09f33b4da32025b31335588fcefb9.tar.gz
linux-cc9542534bf09f33b4da32025b31335588fcefb9.tar.bz2
linux-cc9542534bf09f33b4da32025b31335588fcefb9.zip
iomap: move all remaining per-folio logic into iomap_writepage_map
Move the tracepoint and the iomap check from iomap_do_writepage into iomap_writepage_map. This keeps all logic in one places, and leaves iomap_do_writepage just as the wrapper for the callback conventions of write_cache_pages, which will go away when that is converted to an iterator. Signed-off-by: Christoph Hellwig <hch@lst.de> Link: https://lore.kernel.org/r/20231207072710.176093-7-hch@lst.de Reviewed-by: Ritesh Harjani (IBM) <ritesh.list@gmail.com> Reviewed-by: Darrick J. Wong <djwong@kernel.org> Signed-off-by: Christian Brauner <brauner@kernel.org>
Diffstat (limited to 'fs/iomap')
-rw-r--r--fs/iomap/buffered-io.c34
1 files changed, 11 insertions, 23 deletions
diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c
index 75278e1b05f8..e3175d3cc036 100644
--- a/fs/iomap/buffered-io.c
+++ b/fs/iomap/buffered-io.c
@@ -1832,19 +1832,25 @@ static bool iomap_writepage_handle_eof(struct folio *folio, struct inode *inode,
* At the end of a writeback pass, there will be a cached ioend remaining on the
* writepage context that the caller will need to submit.
*/
-static int
-iomap_writepage_map(struct iomap_writepage_ctx *wpc,
- struct writeback_control *wbc, struct inode *inode,
- struct folio *folio, u64 end_pos)
+static int iomap_writepage_map(struct iomap_writepage_ctx *wpc,
+ struct writeback_control *wbc, struct folio *folio)
{
struct iomap_folio_state *ifs = folio->private;
+ struct inode *inode = folio->mapping->host;
struct iomap_ioend *ioend, *next;
unsigned len = i_blocksize(inode);
unsigned nblocks = i_blocks_per_folio(inode, folio);
u64 pos = folio_pos(folio);
+ u64 end_pos = pos + folio_size(folio);
int error = 0, count = 0, i;
LIST_HEAD(submit_list);
+ trace_iomap_writepage(inode, pos, folio_size(folio));
+
+ if (!iomap_writepage_handle_eof(folio, inode, &end_pos)) {
+ folio_unlock(folio);
+ return 0;
+ }
WARN_ON_ONCE(end_pos <= pos);
if (!ifs && nblocks > 1) {
@@ -1944,28 +1950,10 @@ done:
return error;
}
-/*
- * Write out a dirty page.
- *
- * For delalloc space on the page, we need to allocate space and flush it.
- * For unwritten space on the page, we need to start the conversion to
- * regular allocated space.
- */
static int iomap_do_writepage(struct folio *folio,
struct writeback_control *wbc, void *data)
{
- struct iomap_writepage_ctx *wpc = data;
- struct inode *inode = folio->mapping->host;
- u64 end_pos = folio_pos(folio) + folio_size(folio);
-
- trace_iomap_writepage(inode, folio_pos(folio), folio_size(folio));
-
- if (!iomap_writepage_handle_eof(folio, inode, &end_pos)) {
- folio_unlock(folio);
- return 0;
- }
-
- return iomap_writepage_map(wpc, wbc, inode, folio, end_pos);
+ return iomap_writepage_map(data, wbc, folio);
}
int