aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorGravatar Eric Biggers <ebiggers@google.com> 2023-01-27 14:15:29 -0800
committerGravatar Eric Biggers <ebiggers@google.com> 2023-01-27 14:46:31 -0800
commit5d0f0e57ed900917836385527ce5b122fa1425a3 (patch)
tree2a9dd0ba1351b2783a4febe87e98754e6b8472fe
parentfsverity.rst: update git repo URL for fsverity-utils (diff)
downloadlinux-5d0f0e57ed90.tar.gz
linux-5d0f0e57ed90.tar.bz2
linux-5d0f0e57ed90.zip
fsverity: support verifying data from large folios
Try to make fs/verity/verify.c aware of large folios. This includes making fsverity_verify_bio() support the case where the bio contains large folios, and adding a function fsverity_verify_folio() which is the equivalent of fsverity_verify_page(). There's no way to actually test this with large folios yet, but I've tested that this doesn't cause any regressions. Signed-off-by: Eric Biggers <ebiggers@google.com> Link: https://lore.kernel.org/r/20230127221529.299560-1-ebiggers@kernel.org
-rw-r--r--Documentation/filesystems/fsverity.rst20
-rw-r--r--fs/buffer.c3
-rw-r--r--fs/verity/verify.c43
-rw-r--r--include/linux/fsverity.h15
4 files changed, 44 insertions, 37 deletions
diff --git a/Documentation/filesystems/fsverity.rst b/Documentation/filesystems/fsverity.rst
index 2d9ef906aa2a..ede672dedf11 100644
--- a/Documentation/filesystems/fsverity.rst
+++ b/Documentation/filesystems/fsverity.rst
@@ -568,22 +568,22 @@ Pagecache
~~~~~~~~~
For filesystems using Linux's pagecache, the ``->read_folio()`` and
-``->readahead()`` methods must be modified to verify pages before they
-are marked Uptodate. Merely hooking ``->read_iter()`` would be
+``->readahead()`` methods must be modified to verify folios before
+they are marked Uptodate. Merely hooking ``->read_iter()`` would be
insufficient, since ``->read_iter()`` is not used for memory maps.
Therefore, fs/verity/ provides the function fsverity_verify_blocks()
which verifies data that has been read into the pagecache of a verity
-inode. The containing page must still be locked and not Uptodate, so
+inode. The containing folio must still be locked and not Uptodate, so
it's not yet readable by userspace. As needed to do the verification,
fsverity_verify_blocks() will call back into the filesystem to read
hash blocks via fsverity_operations::read_merkle_tree_page().
fsverity_verify_blocks() returns false if verification failed; in this
-case, the filesystem must not set the page Uptodate. Following this,
+case, the filesystem must not set the folio Uptodate. Following this,
as per the usual Linux pagecache behavior, attempts by userspace to
-read() from the part of the file containing the page will fail with
-EIO, and accesses to the page within a memory map will raise SIGBUS.
+read() from the part of the file containing the folio will fail with
+EIO, and accesses to the folio within a memory map will raise SIGBUS.
In principle, verifying a data block requires verifying the entire
path in the Merkle tree from the data block to the root hash.
@@ -624,8 +624,8 @@ each bio and store it in ``->bi_private``::
verity, or both is enabled. After the bio completes, for each needed
postprocessing step the filesystem enqueues the bio_post_read_ctx on a
workqueue, and then the workqueue work does the decryption or
-verification. Finally, pages where no decryption or verity error
-occurred are marked Uptodate, and the pages are unlocked.
+verification. Finally, folios where no decryption or verity error
+occurred are marked Uptodate, and the folios are unlocked.
On many filesystems, files can contain holes. Normally,
``->readahead()`` simply zeroes hole blocks and considers the
@@ -791,9 +791,9 @@ weren't already directly answered in other parts of this document.
:A: There are many reasons why this is not possible or would be very
difficult, including the following:
- - To prevent bypassing verification, pages must not be marked
+ - To prevent bypassing verification, folios must not be marked
Uptodate until they've been verified. Currently, each
- filesystem is responsible for marking pages Uptodate via
+ filesystem is responsible for marking folios Uptodate via
``->readahead()``. Therefore, currently it's not possible for
the VFS to do the verification on its own. Changing this would
require significant changes to the VFS and all filesystems.
diff --git a/fs/buffer.c b/fs/buffer.c
index 2e65ba2b3919..8499c79ae13d 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -308,7 +308,8 @@ static void verify_bh(struct work_struct *work)
struct buffer_head *bh = ctx->bh;
bool valid;
- valid = fsverity_verify_blocks(bh->b_page, bh->b_size, bh_offset(bh));
+ valid = fsverity_verify_blocks(page_folio(bh->b_page), bh->b_size,
+ bh_offset(bh));
end_buffer_async_read(bh, valid);
kfree(ctx);
}
diff --git a/fs/verity/verify.c b/fs/verity/verify.c
index e59ef9d0e21c..f50e3b5b52c9 100644
--- a/fs/verity/verify.c
+++ b/fs/verity/verify.c
@@ -266,20 +266,23 @@ out:
static bool
verify_data_blocks(struct inode *inode, struct fsverity_info *vi,
- struct ahash_request *req, struct page *data_page,
- unsigned int len, unsigned int offset,
- unsigned long max_ra_pages)
+ struct ahash_request *req, struct folio *data_folio,
+ size_t len, size_t offset, unsigned long max_ra_pages)
{
const unsigned int block_size = vi->tree_params.block_size;
- u64 pos = (u64)data_page->index << PAGE_SHIFT;
+ u64 pos = (u64)data_folio->index << PAGE_SHIFT;
if (WARN_ON_ONCE(len <= 0 || !IS_ALIGNED(len | offset, block_size)))
return false;
- if (WARN_ON_ONCE(!PageLocked(data_page) || PageUptodate(data_page)))
+ if (WARN_ON_ONCE(!folio_test_locked(data_folio) ||
+ folio_test_uptodate(data_folio)))
return false;
do {
- if (!verify_data_block(inode, vi, req, data_page,
- pos + offset, offset, max_ra_pages))
+ struct page *data_page =
+ folio_page(data_folio, offset >> PAGE_SHIFT);
+
+ if (!verify_data_block(inode, vi, req, data_page, pos + offset,
+ offset & ~PAGE_MASK, max_ra_pages))
return false;
offset += block_size;
len -= block_size;
@@ -288,21 +291,20 @@ verify_data_blocks(struct inode *inode, struct fsverity_info *vi,
}
/**
- * fsverity_verify_blocks() - verify data in a page
- * @page: the page containing the data to verify
- * @len: the length of the data to verify in the page
- * @offset: the offset of the data to verify in the page
+ * fsverity_verify_blocks() - verify data in a folio
+ * @folio: the folio containing the data to verify
+ * @len: the length of the data to verify in the folio
+ * @offset: the offset of the data to verify in the folio
*
* Verify data that has just been read from a verity file. The data must be
- * located in a pagecache page that is still locked and not yet uptodate. The
+ * located in a pagecache folio that is still locked and not yet uptodate. The
* length and offset of the data must be Merkle tree block size aligned.
*
* Return: %true if the data is valid, else %false.
*/
-bool fsverity_verify_blocks(struct page *page, unsigned int len,
- unsigned int offset)
+bool fsverity_verify_blocks(struct folio *folio, size_t len, size_t offset)
{
- struct inode *inode = page->mapping->host;
+ struct inode *inode = folio->mapping->host;
struct fsverity_info *vi = inode->i_verity_info;
struct ahash_request *req;
bool valid;
@@ -310,7 +312,7 @@ bool fsverity_verify_blocks(struct page *page, unsigned int len,
/* This allocation never fails, since it's mempool-backed. */
req = fsverity_alloc_hash_request(vi->tree_params.hash_alg, GFP_NOFS);
- valid = verify_data_blocks(inode, vi, req, page, len, offset, 0);
+ valid = verify_data_blocks(inode, vi, req, folio, len, offset, 0);
fsverity_free_hash_request(vi->tree_params.hash_alg, req);
@@ -338,8 +340,7 @@ void fsverity_verify_bio(struct bio *bio)
struct inode *inode = bio_first_page_all(bio)->mapping->host;
struct fsverity_info *vi = inode->i_verity_info;
struct ahash_request *req;
- struct bio_vec *bv;
- struct bvec_iter_all iter_all;
+ struct folio_iter fi;
unsigned long max_ra_pages = 0;
/* This allocation never fails, since it's mempool-backed. */
@@ -358,9 +359,9 @@ void fsverity_verify_bio(struct bio *bio)
max_ra_pages = bio->bi_iter.bi_size >> (PAGE_SHIFT + 2);
}
- bio_for_each_segment_all(bv, bio, iter_all) {
- if (!verify_data_blocks(inode, vi, req, bv->bv_page, bv->bv_len,
- bv->bv_offset, max_ra_pages)) {
+ bio_for_each_folio_all(fi, bio) {
+ if (!verify_data_blocks(inode, vi, req, fi.folio, fi.length,
+ fi.offset, max_ra_pages)) {
bio->bi_status = BLK_STS_IOERR;
break;
}
diff --git a/include/linux/fsverity.h b/include/linux/fsverity.h
index 991a44458996..119a3266791f 100644
--- a/include/linux/fsverity.h
+++ b/include/linux/fsverity.h
@@ -12,6 +12,7 @@
#define _LINUX_FSVERITY_H
#include <linux/fs.h>
+#include <linux/mm.h>
#include <crypto/hash_info.h>
#include <crypto/sha2.h>
#include <uapi/linux/fsverity.h>
@@ -169,8 +170,7 @@ int fsverity_ioctl_read_metadata(struct file *filp, const void __user *uarg);
/* verify.c */
-bool fsverity_verify_blocks(struct page *page, unsigned int len,
- unsigned int offset);
+bool fsverity_verify_blocks(struct folio *folio, size_t len, size_t offset);
void fsverity_verify_bio(struct bio *bio);
void fsverity_enqueue_verify_work(struct work_struct *work);
@@ -230,8 +230,8 @@ static inline int fsverity_ioctl_read_metadata(struct file *filp,
/* verify.c */
-static inline bool fsverity_verify_blocks(struct page *page, unsigned int len,
- unsigned int offset)
+static inline bool fsverity_verify_blocks(struct folio *folio, size_t len,
+ size_t offset)
{
WARN_ON(1);
return false;
@@ -249,9 +249,14 @@ static inline void fsverity_enqueue_verify_work(struct work_struct *work)
#endif /* !CONFIG_FS_VERITY */
+static inline bool fsverity_verify_folio(struct folio *folio)
+{
+ return fsverity_verify_blocks(folio, folio_size(folio), 0);
+}
+
static inline bool fsverity_verify_page(struct page *page)
{
- return fsverity_verify_blocks(page, PAGE_SIZE, 0);
+ return fsverity_verify_blocks(page_folio(page), PAGE_SIZE, 0);
}
/**