aboutsummaryrefslogtreecommitdiff
path: root/fs/btrfs/scrub.c
diff options
context:
space:
mode:
authorGravatar David Sterba <dsterba@suse.com> 2020-07-01 20:45:04 +0200
committerGravatar David Sterba <dsterba@suse.com> 2020-12-08 15:53:57 +0100
commitab108d992b1248adfb7c13c1136cab59c944a98c (patch)
tree3632b276548fe4abb15b73d675bfcf2c27805b2c /fs/btrfs/scrub.c
parentbtrfs: rename page_size to io_size in submit_extent_page (diff)
downloadlinux-ab108d992b1248adfb7c13c1136cab59c944a98c.tar.gz
linux-ab108d992b1248adfb7c13c1136cab59c944a98c.tar.bz2
linux-ab108d992b1248adfb7c13c1136cab59c944a98c.zip
btrfs: use precalculated sectorsize_bits from fs_info
We do a lot of calculations where we divide or multiply by sectorsize. We also know and make sure that sectorsize is a power of two, so this means all divisions can be turned to shifts and avoid eg. expensive u64/u32 divisions. The type is u32 as it's more register friendly on x86_64 compared to u8 and the resulting assembly is smaller (movzbl vs movl). There's also superblock s_blocksize_bits but it's usually one more pointer dereference farther than fs_info. Reviewed-by: Johannes Thumshirn <johannes.thumshirn@wdc.com> Signed-off-by: David Sterba <dsterba@suse.com>
Diffstat (limited to 'fs/btrfs/scrub.c')
-rw-r--r--fs/btrfs/scrub.c12
1 files changed, 6 insertions, 6 deletions
diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
index 04351b55dd14..c307f25c4846 100644
--- a/fs/btrfs/scrub.c
+++ b/fs/btrfs/scrub.c
@@ -2300,7 +2300,7 @@ static inline void __scrub_mark_bitmap(struct scrub_parity *sparity,
u64 offset;
u64 nsectors64;
u32 nsectors;
- int sectorsize = sparity->sctx->fs_info->sectorsize;
+ u32 sectorsize_bits = sparity->sctx->fs_info->sectorsize_bits;
if (len >= sparity->stripe_len) {
bitmap_set(bitmap, 0, sparity->nsectors);
@@ -2309,8 +2309,8 @@ static inline void __scrub_mark_bitmap(struct scrub_parity *sparity,
start -= sparity->logic_start;
start = div64_u64_rem(start, sparity->stripe_len, &offset);
- offset = div_u64(offset, sectorsize);
- nsectors64 = div_u64(len, sectorsize);
+ offset = offset >> sectorsize_bits;
+ nsectors64 = len >> sectorsize_bits;
ASSERT(nsectors64 < UINT_MAX);
nsectors = (u32)nsectors64;
@@ -2386,10 +2386,10 @@ static int scrub_find_csum(struct scrub_ctx *sctx, u64 logical, u8 *csum)
if (!sum)
return 0;
- index = div_u64(logical - sum->bytenr, sctx->fs_info->sectorsize);
+ index = (logical - sum->bytenr) >> sctx->fs_info->sectorsize_bits;
ASSERT(index < UINT_MAX);
- num_sectors = sum->len / sctx->fs_info->sectorsize;
+ num_sectors = sum->len >> sctx->fs_info->sectorsize_bits;
memcpy(csum, sum->sums + index * sctx->csum_size, sctx->csum_size);
if (index == num_sectors - 1) {
list_del(&sum->list);
@@ -2776,7 +2776,7 @@ static noinline_for_stack int scrub_raid56_parity(struct scrub_ctx *sctx,
int extent_mirror_num;
int stop_loop = 0;
- nsectors = div_u64(map->stripe_len, fs_info->sectorsize);
+ nsectors = map->stripe_len >> fs_info->sectorsize_bits;
bitmap_len = scrub_calc_parity_bitmap_len(nsectors);
sparity = kzalloc(sizeof(struct scrub_parity) + 2 * bitmap_len,
GFP_NOFS);