aboutsummaryrefslogtreecommitdiff
path: root/block
diff options
context:
space:
mode:
Diffstat (limited to 'block')
-rw-r--r--block/bdev.c97
-rw-r--r--block/bio-integrity.c26
-rw-r--r--block/blk-cgroup.c87
-rw-r--r--block/blk-core.c30
-rw-r--r--block/blk-flush.c3
-rw-r--r--block/blk-mq.c22
-rw-r--r--block/blk-settings.c10
-rw-r--r--block/blk-stat.h1
-rw-r--r--block/blk-throttle.c29
-rw-r--r--block/blk-throttle.h8
-rw-r--r--block/blk-zoned.c58
-rw-r--r--block/blk.h2
-rw-r--r--block/early-lookup.c2
-rw-r--r--block/fops.c4
-rw-r--r--block/genhd.c23
-rw-r--r--block/ioctl.c40
-rw-r--r--block/partitions/core.c20
-rw-r--r--block/partitions/ldm.c6
-rw-r--r--block/sed-opal.c2
-rw-r--r--block/t10-pi.c2
20 files changed, 310 insertions, 162 deletions
diff --git a/block/bdev.c b/block/bdev.c
index 2af3dca56f3d..353677ac49b3 100644
--- a/block/bdev.c
+++ b/block/bdev.c
@@ -43,6 +43,11 @@ static inline struct bdev_inode *BDEV_I(struct inode *inode)
return container_of(inode, struct bdev_inode, vfs_inode);
}
+static inline struct inode *BD_INODE(struct block_device *bdev)
+{
+ return &container_of(bdev, struct bdev_inode, bdev)->vfs_inode;
+}
+
struct block_device *I_BDEV(struct inode *inode)
{
return &BDEV_I(inode)->bdev;
@@ -57,7 +62,7 @@ EXPORT_SYMBOL(file_bdev);
static void bdev_write_inode(struct block_device *bdev)
{
- struct inode *inode = bdev->bd_inode;
+ struct inode *inode = BD_INODE(bdev);
int ret;
spin_lock(&inode->i_lock);
@@ -76,7 +81,7 @@ static void bdev_write_inode(struct block_device *bdev)
/* Kill _all_ buffers and pagecache , dirty or not.. */
static void kill_bdev(struct block_device *bdev)
{
- struct address_space *mapping = bdev->bd_inode->i_mapping;
+ struct address_space *mapping = bdev->bd_mapping;
if (mapping_empty(mapping))
return;
@@ -88,7 +93,7 @@ static void kill_bdev(struct block_device *bdev)
/* Invalidate clean unused buffers and pagecache. */
void invalidate_bdev(struct block_device *bdev)
{
- struct address_space *mapping = bdev->bd_inode->i_mapping;
+ struct address_space *mapping = bdev->bd_mapping;
if (mapping->nrpages) {
invalidate_bh_lrus();
@@ -116,7 +121,7 @@ int truncate_bdev_range(struct block_device *bdev, blk_mode_t mode,
goto invalidate;
}
- truncate_inode_pages_range(bdev->bd_inode->i_mapping, lstart, lend);
+ truncate_inode_pages_range(bdev->bd_mapping, lstart, lend);
if (!(mode & BLK_OPEN_EXCL))
bd_abort_claiming(bdev, truncate_bdev_range);
return 0;
@@ -126,7 +131,7 @@ invalidate:
* Someone else has handle exclusively open. Try invalidating instead.
* The 'end' argument is inclusive so the rounding is safe.
*/
- return invalidate_inode_pages2_range(bdev->bd_inode->i_mapping,
+ return invalidate_inode_pages2_range(bdev->bd_mapping,
lstart >> PAGE_SHIFT,
lend >> PAGE_SHIFT);
}
@@ -134,18 +139,21 @@ invalidate:
static void set_init_blocksize(struct block_device *bdev)
{
unsigned int bsize = bdev_logical_block_size(bdev);
- loff_t size = i_size_read(bdev->bd_inode);
+ loff_t size = i_size_read(BD_INODE(bdev));
while (bsize < PAGE_SIZE) {
if (size & bsize)
break;
bsize <<= 1;
}
- bdev->bd_inode->i_blkbits = blksize_bits(bsize);
+ BD_INODE(bdev)->i_blkbits = blksize_bits(bsize);
}
-int set_blocksize(struct block_device *bdev, int size)
+int set_blocksize(struct file *file, int size)
{
+ struct inode *inode = file->f_mapping->host;
+ struct block_device *bdev = I_BDEV(inode);
+
/* Size must be a power of two, and between 512 and PAGE_SIZE */
if (size > PAGE_SIZE || size < 512 || !is_power_of_2(size))
return -EINVAL;
@@ -154,10 +162,13 @@ int set_blocksize(struct block_device *bdev, int size)
if (size < bdev_logical_block_size(bdev))
return -EINVAL;
+ if (!file->private_data)
+ return -EINVAL;
+
/* Don't change the size if it is same as current */
- if (bdev->bd_inode->i_blkbits != blksize_bits(size)) {
+ if (inode->i_blkbits != blksize_bits(size)) {
sync_blockdev(bdev);
- bdev->bd_inode->i_blkbits = blksize_bits(size);
+ inode->i_blkbits = blksize_bits(size);
kill_bdev(bdev);
}
return 0;
@@ -167,7 +178,7 @@ EXPORT_SYMBOL(set_blocksize);
int sb_set_blocksize(struct super_block *sb, int size)
{
- if (set_blocksize(sb->s_bdev, size))
+ if (set_blocksize(sb->s_bdev_file, size))
return 0;
/* If we get here, we know size is power of two
* and it's value is between 512 and PAGE_SIZE */
@@ -192,7 +203,7 @@ int sync_blockdev_nowait(struct block_device *bdev)
{
if (!bdev)
return 0;
- return filemap_flush(bdev->bd_inode->i_mapping);
+ return filemap_flush(bdev->bd_mapping);
}
EXPORT_SYMBOL_GPL(sync_blockdev_nowait);
@@ -204,13 +215,13 @@ int sync_blockdev(struct block_device *bdev)
{
if (!bdev)
return 0;
- return filemap_write_and_wait(bdev->bd_inode->i_mapping);
+ return filemap_write_and_wait(bdev->bd_mapping);
}
EXPORT_SYMBOL(sync_blockdev);
int sync_blockdev_range(struct block_device *bdev, loff_t lstart, loff_t lend)
{
- return filemap_write_and_wait_range(bdev->bd_inode->i_mapping,
+ return filemap_write_and_wait_range(bdev->bd_mapping,
lstart, lend);
}
EXPORT_SYMBOL(sync_blockdev_range);
@@ -411,13 +422,11 @@ struct block_device *bdev_alloc(struct gendisk *disk, u8 partno)
mutex_init(&bdev->bd_fsfreeze_mutex);
spin_lock_init(&bdev->bd_size_lock);
mutex_init(&bdev->bd_holder_lock);
- bdev->bd_partno = partno;
- bdev->bd_inode = inode;
+ atomic_set(&bdev->__bd_flags, partno);
+ bdev->bd_mapping = &inode->i_data;
bdev->bd_queue = disk->queue;
- if (partno)
- bdev->bd_has_submit_bio = disk->part0->bd_has_submit_bio;
- else
- bdev->bd_has_submit_bio = false;
+ if (partno && bdev_test_flag(disk->part0, BD_HAS_SUBMIT_BIO))
+ bdev_set_flag(bdev, BD_HAS_SUBMIT_BIO);
bdev->bd_stats = alloc_percpu(struct disk_stats);
if (!bdev->bd_stats) {
iput(inode);
@@ -430,19 +439,30 @@ struct block_device *bdev_alloc(struct gendisk *disk, u8 partno)
void bdev_set_nr_sectors(struct block_device *bdev, sector_t sectors)
{
spin_lock(&bdev->bd_size_lock);
- i_size_write(bdev->bd_inode, (loff_t)sectors << SECTOR_SHIFT);
+ i_size_write(BD_INODE(bdev), (loff_t)sectors << SECTOR_SHIFT);
bdev->bd_nr_sectors = sectors;
spin_unlock(&bdev->bd_size_lock);
}
void bdev_add(struct block_device *bdev, dev_t dev)
{
+ struct inode *inode = BD_INODE(bdev);
if (bdev_stable_writes(bdev))
- mapping_set_stable_writes(bdev->bd_inode->i_mapping);
+ mapping_set_stable_writes(bdev->bd_mapping);
bdev->bd_dev = dev;
- bdev->bd_inode->i_rdev = dev;
- bdev->bd_inode->i_ino = dev;
- insert_inode_hash(bdev->bd_inode);
+ inode->i_rdev = dev;
+ inode->i_ino = dev;
+ insert_inode_hash(inode);
+}
+
+void bdev_unhash(struct block_device *bdev)
+{
+ remove_inode_hash(BD_INODE(bdev));
+}
+
+void bdev_drop(struct block_device *bdev)
+{
+ iput(BD_INODE(bdev));
}
long nr_blockdev_pages(void)
@@ -620,7 +640,7 @@ static void bd_end_claim(struct block_device *bdev, void *holder)
bdev->bd_holder = NULL;
bdev->bd_holder_ops = NULL;
mutex_unlock(&bdev->bd_holder_lock);
- if (bdev->bd_write_holder)
+ if (bdev_test_flag(bdev, BD_WRITE_HOLDER))
unblock = true;
}
if (!whole->bd_holders)
@@ -633,7 +653,7 @@ static void bd_end_claim(struct block_device *bdev, void *holder)
*/
if (unblock) {
disk_unblock_events(bdev->bd_disk);
- bdev->bd_write_holder = false;
+ bdev_clear_flag(bdev, BD_WRITE_HOLDER);
}
}
@@ -900,9 +920,10 @@ int bdev_open(struct block_device *bdev, blk_mode_t mode, void *holder,
* writeable reference is too fragile given the way @mode is
* used in blkdev_get/put().
*/
- if ((mode & BLK_OPEN_WRITE) && !bdev->bd_write_holder &&
+ if ((mode & BLK_OPEN_WRITE) &&
+ !bdev_test_flag(bdev, BD_WRITE_HOLDER) &&
(disk->event_flags & DISK_EVENT_FLAG_BLOCK_ON_EXCL_WRITE)) {
- bdev->bd_write_holder = true;
+ bdev_set_flag(bdev, BD_WRITE_HOLDER);
unblock_events = false;
}
}
@@ -917,7 +938,7 @@ int bdev_open(struct block_device *bdev, blk_mode_t mode, void *holder,
bdev_file->f_mode |= FMODE_NOWAIT;
if (mode & BLK_OPEN_RESTRICT_WRITES)
bdev_file->f_mode |= FMODE_WRITE_RESTRICTED;
- bdev_file->f_mapping = bdev->bd_inode->i_mapping;
+ bdev_file->f_mapping = bdev->bd_mapping;
bdev_file->f_wb_err = filemap_sample_wb_err(bdev_file->f_mapping);
bdev_file->private_data = holder;
@@ -979,13 +1000,13 @@ struct file *bdev_file_open_by_dev(dev_t dev, blk_mode_t mode, void *holder,
return ERR_PTR(-ENXIO);
flags = blk_to_file_flags(mode);
- bdev_file = alloc_file_pseudo_noaccount(bdev->bd_inode,
+ bdev_file = alloc_file_pseudo_noaccount(BD_INODE(bdev),
blockdev_mnt, "", flags | O_LARGEFILE, &def_blk_fops);
if (IS_ERR(bdev_file)) {
blkdev_put_no_open(bdev);
return bdev_file;
}
- ihold(bdev->bd_inode);
+ ihold(BD_INODE(bdev));
ret = bdev_open(bdev, mode, holder, hops, bdev_file);
if (ret) {
@@ -1260,6 +1281,18 @@ void bdev_statx_dioalign(struct inode *inode, struct kstat *stat)
blkdev_put_no_open(bdev);
}
+bool disk_live(struct gendisk *disk)
+{
+ return !inode_unhashed(BD_INODE(disk->part0));
+}
+EXPORT_SYMBOL_GPL(disk_live);
+
+unsigned int block_size(struct block_device *bdev)
+{
+ return 1 << BD_INODE(bdev)->i_blkbits;
+}
+EXPORT_SYMBOL_GPL(block_size);
+
static int __init setup_bdev_allow_write_mounted(char *str)
{
if (kstrtobool(str, &bdev_allow_write_mounted))
diff --git a/block/bio-integrity.c b/block/bio-integrity.c
index 2e3e8e04961e..8b528e12136f 100644
--- a/block/bio-integrity.c
+++ b/block/bio-integrity.c
@@ -144,10 +144,10 @@ void bio_integrity_free(struct bio *bio)
struct bio_integrity_payload *bip = bio_integrity(bio);
struct bio_set *bs = bio->bi_pool;
+ if (bip->bip_flags & BIP_INTEGRITY_USER)
+ return;
if (bip->bip_flags & BIP_BLOCK_INTEGRITY)
kfree(bvec_virt(bip->bip_vec));
- else if (bip->bip_flags & BIP_INTEGRITY_USER)
- bio_integrity_unmap_user(bip);
__bio_integrity_free(bs, bip);
bio->bi_integrity = NULL;
@@ -155,6 +155,28 @@ void bio_integrity_free(struct bio *bio)
}
/**
+ * bio_integrity_unmap_free_user - Unmap and free bio user integrity payload
+ * @bio: bio containing bip to be unmapped and freed
+ *
+ * Description: Used to unmap and free the user mapped integrity portion of a
+ * bio. Submitter attaching the user integrity buffer is responsible for
+ * unmapping and freeing it during completion.
+ */
+void bio_integrity_unmap_free_user(struct bio *bio)
+{
+ struct bio_integrity_payload *bip = bio_integrity(bio);
+ struct bio_set *bs = bio->bi_pool;
+
+ if (WARN_ON_ONCE(!(bip->bip_flags & BIP_INTEGRITY_USER)))
+ return;
+ bio_integrity_unmap_user(bip);
+ __bio_integrity_free(bs, bip);
+ bio->bi_integrity = NULL;
+ bio->bi_opf &= ~REQ_INTEGRITY;
+}
+EXPORT_SYMBOL(bio_integrity_unmap_free_user);
+
+/**
* bio_integrity_add_page - Attach integrity metadata
* @bio: bio to update
* @page: page containing integrity metadata
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index 4b1a35ab0ea4..37e6cc91d576 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -322,6 +322,7 @@ static struct blkcg_gq *blkg_alloc(struct blkcg *blkcg, struct gendisk *disk,
blkg->q = disk->queue;
INIT_LIST_HEAD(&blkg->q_node);
blkg->blkcg = blkcg;
+ blkg->iostat.blkg = blkg;
#ifdef CONFIG_BLK_CGROUP_PUNT_BIO
spin_lock_init(&blkg->async_bio_lock);
bio_list_init(&blkg->async_bios);
@@ -618,12 +619,45 @@ restart:
spin_unlock_irq(&q->queue_lock);
}
+static void blkg_iostat_set(struct blkg_iostat *dst, struct blkg_iostat *src)
+{
+ int i;
+
+ for (i = 0; i < BLKG_IOSTAT_NR; i++) {
+ dst->bytes[i] = src->bytes[i];
+ dst->ios[i] = src->ios[i];
+ }
+}
+
+static void __blkg_clear_stat(struct blkg_iostat_set *bis)
+{
+ struct blkg_iostat cur = {0};
+ unsigned long flags;
+
+ flags = u64_stats_update_begin_irqsave(&bis->sync);
+ blkg_iostat_set(&bis->cur, &cur);
+ blkg_iostat_set(&bis->last, &cur);
+ u64_stats_update_end_irqrestore(&bis->sync, flags);
+}
+
+static void blkg_clear_stat(struct blkcg_gq *blkg)
+{
+ int cpu;
+
+ for_each_possible_cpu(cpu) {
+ struct blkg_iostat_set *s = per_cpu_ptr(blkg->iostat_cpu, cpu);
+
+ __blkg_clear_stat(s);
+ }
+ __blkg_clear_stat(&blkg->iostat);
+}
+
static int blkcg_reset_stats(struct cgroup_subsys_state *css,
struct cftype *cftype, u64 val)
{
struct blkcg *blkcg = css_to_blkcg(css);
struct blkcg_gq *blkg;
- int i, cpu;
+ int i;
mutex_lock(&blkcg_pol_mutex);
spin_lock_irq(&blkcg->lock);
@@ -634,18 +668,7 @@ static int blkcg_reset_stats(struct cgroup_subsys_state *css,
* anyway. If you get hit by a race, retry.
*/
hlist_for_each_entry(blkg, &blkcg->blkg_list, blkcg_node) {
- for_each_possible_cpu(cpu) {
- struct blkg_iostat_set *bis =
- per_cpu_ptr(blkg->iostat_cpu, cpu);
- memset(bis, 0, sizeof(*bis));
-
- /* Re-initialize the cleared blkg_iostat_set */
- u64_stats_init(&bis->sync);
- bis->blkg = blkg;
- }
- memset(&blkg->iostat, 0, sizeof(blkg->iostat));
- u64_stats_init(&blkg->iostat.sync);
-
+ blkg_clear_stat(blkg);
for (i = 0; i < BLKCG_MAX_POLS; i++) {
struct blkcg_policy *pol = blkcg_policy[i];
@@ -948,16 +971,6 @@ void blkg_conf_exit(struct blkg_conf_ctx *ctx)
}
EXPORT_SYMBOL_GPL(blkg_conf_exit);
-static void blkg_iostat_set(struct blkg_iostat *dst, struct blkg_iostat *src)
-{
- int i;
-
- for (i = 0; i < BLKG_IOSTAT_NR; i++) {
- dst->bytes[i] = src->bytes[i];
- dst->ios[i] = src->ios[i];
- }
-}
-
static void blkg_iostat_add(struct blkg_iostat *dst, struct blkg_iostat *src)
{
int i;
@@ -1023,7 +1036,19 @@ static void __blkcg_rstat_flush(struct blkcg *blkcg, int cpu)
struct blkg_iostat cur;
unsigned int seq;
+ /*
+ * Order assignment of `next_bisc` from `bisc->lnode.next` in
+ * llist_for_each_entry_safe and clearing `bisc->lqueued` for
+ * avoiding to assign `next_bisc` with new next pointer added
+ * in blk_cgroup_bio_start() in case of re-ordering.
+ *
+ * The pair barrier is implied in llist_add() in blk_cgroup_bio_start().
+ */
+ smp_mb();
+
WRITE_ONCE(bisc->lqueued, false);
+ if (bisc == &blkg->iostat)
+ goto propagate_up; /* propagate up to parent only */
/* fetch the current per-cpu values */
do {
@@ -1033,10 +1058,24 @@ static void __blkcg_rstat_flush(struct blkcg *blkcg, int cpu)
blkcg_iostat_update(blkg, &cur, &bisc->last);
+propagate_up:
/* propagate global delta to parent (unless that's root) */
- if (parent && parent->parent)
+ if (parent && parent->parent) {
blkcg_iostat_update(parent, &blkg->iostat.cur,
&blkg->iostat.last);
+ /*
+ * Queue parent->iostat to its blkcg's lockless
+ * list to propagate up to the grandparent if the
+ * iostat hasn't been queued yet.
+ */
+ if (!parent->iostat.lqueued) {
+ struct llist_head *plhead;
+
+ plhead = per_cpu_ptr(parent->blkcg->lhead, cpu);
+ llist_add(&parent->iostat.lnode, plhead);
+ parent->iostat.lqueued = true;
+ }
+ }
}
raw_spin_unlock_irqrestore(&blkg_stat_lock, flags);
out:
diff --git a/block/blk-core.c b/block/blk-core.c
index 01186333c88e..82c3ae22d76d 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -496,7 +496,8 @@ __setup("fail_make_request=", setup_fail_make_request);
bool should_fail_request(struct block_device *part, unsigned int bytes)
{
- return part->bd_make_it_fail && should_fail(&fail_make_request, bytes);
+ return bdev_test_flag(part, BD_MAKE_IT_FAIL) &&
+ should_fail(&fail_make_request, bytes);
}
static int __init fail_make_request_debugfs(void)
@@ -516,10 +517,11 @@ static inline void bio_check_ro(struct bio *bio)
if (op_is_flush(bio->bi_opf) && !bio_sectors(bio))
return;
- if (bio->bi_bdev->bd_ro_warned)
+ if (bdev_test_flag(bio->bi_bdev, BD_RO_WARNED))
return;
- bio->bi_bdev->bd_ro_warned = true;
+ bdev_set_flag(bio->bi_bdev, BD_RO_WARNED);
+
/*
* Use ioctl to set underlying disk of raid/dm to read-only
* will trigger this.
@@ -613,10 +615,15 @@ static inline blk_status_t blk_check_zone_append(struct request_queue *q,
static void __submit_bio(struct bio *bio)
{
+ /* If plug is not used, add new plug here to cache nsecs time. */
+ struct blk_plug plug;
+
if (unlikely(!blk_crypto_bio_prep(&bio)))
return;
- if (!bio->bi_bdev->bd_has_submit_bio) {
+ blk_start_plug(&plug);
+
+ if (!bdev_test_flag(bio->bi_bdev, BD_HAS_SUBMIT_BIO)) {
blk_mq_submit_bio(bio);
} else if (likely(bio_queue_enter(bio) == 0)) {
struct gendisk *disk = bio->bi_bdev->bd_disk;
@@ -624,6 +631,8 @@ static void __submit_bio(struct bio *bio)
disk->fops->submit_bio(bio);
blk_queue_exit(disk->queue);
}
+
+ blk_finish_plug(&plug);
}
/*
@@ -648,13 +657,11 @@ static void __submit_bio(struct bio *bio)
static void __submit_bio_noacct(struct bio *bio)
{
struct bio_list bio_list_on_stack[2];
- struct blk_plug plug;
BUG_ON(bio->bi_next);
bio_list_init(&bio_list_on_stack[0]);
current->bio_list = bio_list_on_stack;
- blk_start_plug(&plug);
do {
struct request_queue *q = bdev_get_queue(bio->bi_bdev);
@@ -688,23 +695,19 @@ static void __submit_bio_noacct(struct bio *bio)
bio_list_merge(&bio_list_on_stack[0], &bio_list_on_stack[1]);
} while ((bio = bio_list_pop(&bio_list_on_stack[0])));
- blk_finish_plug(&plug);
current->bio_list = NULL;
}
static void __submit_bio_noacct_mq(struct bio *bio)
{
struct bio_list bio_list[2] = { };
- struct blk_plug plug;
current->bio_list = bio_list;
- blk_start_plug(&plug);
do {
__submit_bio(bio);
} while ((bio = bio_list_pop(&bio_list[0])));
- blk_finish_plug(&plug);
current->bio_list = NULL;
}
@@ -730,7 +733,7 @@ void submit_bio_noacct_nocheck(struct bio *bio)
*/
if (current->bio_list)
bio_list_add(&current->bio_list[0], bio);
- else if (!bio->bi_bdev->bd_has_submit_bio)
+ else if (!bdev_test_flag(bio->bi_bdev, BD_HAS_SUBMIT_BIO))
__submit_bio_noacct_mq(bio);
else
__submit_bio_noacct(bio);
@@ -766,7 +769,8 @@ void submit_bio_noacct(struct bio *bio)
if (!bio_flagged(bio, BIO_REMAPPED)) {
if (unlikely(bio_check_eod(bio)))
goto end_io;
- if (bdev->bd_partno && unlikely(blk_partition_remap(bio)))
+ if (bdev_is_partition(bdev) &&
+ unlikely(blk_partition_remap(bio)))
goto end_io;
}
@@ -991,7 +995,7 @@ again:
(end || part_in_flight(part)))
__part_stat_add(part, io_ticks, now - stamp);
- if (part->bd_partno) {
+ if (bdev_is_partition(part)) {
part = bdev_whole(part);
goto again;
}
diff --git a/block/blk-flush.c b/block/blk-flush.c
index c17cf8ed8113..cca4f9131f79 100644
--- a/block/blk-flush.c
+++ b/block/blk-flush.c
@@ -185,7 +185,7 @@ static void blk_flush_complete_seq(struct request *rq,
/* queue for flush */
if (list_empty(pending))
fq->flush_pending_since = jiffies;
- list_move_tail(&rq->queuelist, pending);
+ list_add_tail(&rq->queuelist, pending);
break;
case REQ_FSEQ_DATA:
@@ -263,6 +263,7 @@ static enum rq_end_io_ret flush_end_io(struct request *flush_rq,
unsigned int seq = blk_flush_cur_seq(rq);
BUG_ON(seq != REQ_FSEQ_PREFLUSH && seq != REQ_FSEQ_POSTFLUSH);
+ list_del_init(&rq->queuelist);
blk_flush_complete_seq(rq, fq, seq, error);
}
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 8e01e4b32e10..3b4df8e5ac9e 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -93,7 +93,7 @@ static bool blk_mq_check_inflight(struct request *rq, void *priv)
struct mq_inflight *mi = priv;
if (rq->part && blk_do_io_stat(rq) &&
- (!mi->part->bd_partno || rq->part == mi->part) &&
+ (!bdev_is_partition(mi->part) || rq->part == mi->part) &&
blk_mq_rq_state(rq) == MQ_RQ_IN_FLIGHT)
mi->inflight[rq_data_dir(rq)]++;
@@ -3545,12 +3545,28 @@ static int blk_mq_hctx_notify_offline(unsigned int cpu, struct hlist_node *node)
return 0;
}
+/*
+ * Check if one CPU is mapped to the specified hctx
+ *
+ * Isolated CPUs have been ruled out from hctx->cpumask, which is supposed
+ * to be used for scheduling kworker only. For other usage, please call this
+ * helper for checking if one CPU belongs to the specified hctx
+ */
+static bool blk_mq_cpu_mapped_to_hctx(unsigned int cpu,
+ const struct blk_mq_hw_ctx *hctx)
+{
+ struct blk_mq_hw_ctx *mapped_hctx = blk_mq_map_queue_type(hctx->queue,
+ hctx->type, cpu);
+
+ return mapped_hctx == hctx;
+}
+
static int blk_mq_hctx_notify_online(unsigned int cpu, struct hlist_node *node)
{
struct blk_mq_hw_ctx *hctx = hlist_entry_safe(node,
struct blk_mq_hw_ctx, cpuhp_online);
- if (cpumask_test_cpu(cpu, hctx->cpumask))
+ if (blk_mq_cpu_mapped_to_hctx(cpu, hctx))
clear_bit(BLK_MQ_S_INACTIVE, &hctx->state);
return 0;
}
@@ -3568,7 +3584,7 @@ static int blk_mq_hctx_notify_dead(unsigned int cpu, struct hlist_node *node)
enum hctx_type type;
hctx = hlist_entry_safe(node, struct blk_mq_hw_ctx, cpuhp_dead);
- if (!cpumask_test_cpu(cpu, hctx->cpumask))
+ if (!blk_mq_cpu_mapped_to_hctx(cpu, hctx))
return 0;
ctx = __blk_mq_get_ctx(hctx->queue, cpu);
diff --git a/block/blk-settings.c b/block/blk-settings.c
index a7fe8e90240a..effeb9a639bb 100644
--- a/block/blk-settings.c
+++ b/block/blk-settings.c
@@ -104,6 +104,7 @@ static int blk_validate_zoned_limits(struct queue_limits *lim)
static int blk_validate_limits(struct queue_limits *lim)
{
unsigned int max_hw_sectors;
+ unsigned int logical_block_sectors;
/*
* Unless otherwise specified, default to 512 byte logical blocks and a
@@ -134,8 +135,11 @@ static int blk_validate_limits(struct queue_limits *lim)
lim->max_hw_sectors = BLK_SAFE_MAX_SECTORS;
if (WARN_ON_ONCE(lim->max_hw_sectors < PAGE_SECTORS))
return -EINVAL;
+ logical_block_sectors = lim->logical_block_size >> SECTOR_SHIFT;
+ if (WARN_ON_ONCE(logical_block_sectors > lim->max_hw_sectors))
+ return -EINVAL;
lim->max_hw_sectors = round_down(lim->max_hw_sectors,
- lim->logical_block_size >> SECTOR_SHIFT);
+ logical_block_sectors);
/*
* The actual max_sectors value is a complex beast and also takes the
@@ -153,7 +157,7 @@ static int blk_validate_limits(struct queue_limits *lim)
lim->max_sectors = min(max_hw_sectors, BLK_DEF_MAX_SECTORS_CAP);
}
lim->max_sectors = round_down(lim->max_sectors,
- lim->logical_block_size >> SECTOR_SHIFT);
+ logical_block_sectors);
/*
* Random default for the maximum number of segments. Driver should not
@@ -611,6 +615,8 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
unsigned int top, bottom, alignment, ret = 0;
t->max_sectors = min_not_zero(t->max_sectors, b->max_sectors);
+ t->max_user_sectors = min_not_zero(t->max_user_sectors,
+ b->max_user_sectors);
t->max_hw_sectors = min_not_zero(t->max_hw_sectors, b->max_hw_sectors);
t->max_dev_sectors = min_not_zero(t->max_dev_sectors, b->max_dev_sectors);
t->max_write_zeroes_sectors = min(t->max_write_zeroes_sectors,
diff --git a/block/blk-stat.h b/block/blk-stat.h
index 17e1eb4ec7e2..5d7f18ba436d 100644
--- a/block/blk-stat.h
+++ b/block/blk-stat.h
@@ -64,7 +64,6 @@ struct blk_stat_callback {
struct blk_queue_stats *blk_alloc_queue_stats(void);
void blk_free_queue_stats(struct blk_queue_stats *);
-bool blk_stats_alloc_enable(struct request_queue *q);
void blk_stat_add(struct request *rq, u64 now);
diff --git a/block/blk-throttle.c b/block/blk-throttle.c
index 80aaca18bfb0..c1bf73f8c75d 100644
--- a/block/blk-throttle.c
+++ b/block/blk-throttle.c
@@ -39,11 +39,6 @@ struct latency_bucket {
int samples;
};
-struct avg_latency_bucket {
- unsigned long latency; /* ns / 1024 */
- bool valid;
-};
-
struct throtl_data
{
/* service tree for active throtl groups */
@@ -1404,32 +1399,32 @@ static u64 tg_prfill_limit(struct seq_file *sf, struct blkg_policy_data *pd,
bps_dft = U64_MAX;
iops_dft = UINT_MAX;
- if (tg->bps_conf[READ] == bps_dft &&
- tg->bps_conf[WRITE] == bps_dft &&
- tg->iops_conf[READ] == iops_dft &&
- tg->iops_conf[WRITE] == iops_dft)
+ if (tg->bps[READ] == bps_dft &&
+ tg->bps[WRITE] == bps_dft &&
+ tg->iops[READ] == iops_dft &&
+ tg->iops[WRITE] == iops_dft)
return 0;
seq_printf(sf, "%s", dname);
- if (tg->bps_conf[READ] == U64_MAX)
+ if (tg->bps[READ] == U64_MAX)
seq_printf(sf, " rbps=max");
else
- seq_printf(sf, " rbps=%llu", tg->bps_conf[READ]);
+ seq_printf(sf, " rbps=%llu", tg->bps[READ]);
- if (tg->bps_conf[WRITE] == U64_MAX)
+ if (tg->bps[WRITE] == U64_MAX)
seq_printf(sf, " wbps=max");
else
- seq_printf(sf, " wbps=%llu", tg->bps_conf[WRITE]);
+ seq_printf(sf, " wbps=%llu", tg->bps[WRITE]);
- if (tg->iops_conf[READ] == UINT_MAX)
+ if (tg->iops[READ] == UINT_MAX)
seq_printf(sf, " riops=max");
else
- seq_printf(sf, " riops=%u", tg->iops_conf[READ]);
+ seq_printf(sf, " riops=%u", tg->iops[READ]);
- if (tg->iops_conf[WRITE] == UINT_MAX)
+ if (tg->iops[WRITE] == UINT_MAX)
seq_printf(sf, " wiops=max");
else
- seq_printf(sf, " wiops=%u", tg->iops_conf[WRITE]);
+ seq_printf(sf, " wiops=%u", tg->iops[WRITE]);
seq_printf(sf, "\n");
return 0;
diff --git a/block/blk-throttle.h b/block/blk-throttle.h
index 393c3d134b96..4d9ef5abdf21 100644
--- a/block/blk-throttle.h
+++ b/block/blk-throttle.h
@@ -95,15 +95,11 @@ struct throtl_grp {
bool has_rules_bps[2];
bool has_rules_iops[2];
- /* internally used bytes per second rate limits */
+ /* bytes per second rate limits */
uint64_t bps[2];
- /* user configured bps limits */
- uint64_t bps_conf[2];
- /* internally used IOPS limits */
+ /* IOPS limits */
unsigned int iops[2];
- /* user configured IOPS limits */
- unsigned int iops_conf[2];
/* Number of bytes dispatched in current slice */
uint64_t bytes_disp[2];
diff --git a/block/blk-zoned.c b/block/blk-zoned.c
index 48e5e3bbb89c..08d7dfe8bd93 100644
--- a/block/blk-zoned.c
+++ b/block/blk-zoned.c
@@ -416,7 +416,7 @@ int blkdev_zone_mgmt_ioctl(struct block_device *bdev, blk_mode_t mode,
op = REQ_OP_ZONE_RESET;
/* Invalidate the page cache, including dirty pages. */
- filemap_invalidate_lock(bdev->bd_inode->i_mapping);
+ filemap_invalidate_lock(bdev->bd_mapping);
ret = blkdev_truncate_zone_range(bdev, mode, &zrange);
if (ret)
goto fail;
@@ -438,7 +438,7 @@ int blkdev_zone_mgmt_ioctl(struct block_device *bdev, blk_mode_t mode,
fail:
if (cmd == BLKRESETZONE)
- filemap_invalidate_unlock(bdev->bd_inode->i_mapping);
+ filemap_invalidate_unlock(bdev->bd_mapping);
return ret;
}
@@ -450,6 +450,25 @@ static inline bool disk_zone_is_conv(struct gendisk *disk, sector_t sector)
return test_bit(disk_zone_no(disk, sector), disk->conv_zones_bitmap);
}
+static bool disk_zone_is_last(struct gendisk *disk, struct blk_zone *zone)
+{
+ return zone->start + zone->len >= get_capacity(disk);
+}
+
+static bool disk_zone_is_full(struct gendisk *disk,
+ unsigned int zno, unsigned int offset_in_zone)
+{
+ if (zno < disk->nr_zones - 1)
+ return offset_in_zone >= disk->zone_capacity;
+ return offset_in_zone >= disk->last_zone_capacity;
+}
+
+static bool disk_zone_wplug_is_full(struct gendisk *disk,
+ struct blk_zone_wplug *zwplug)
+{
+ return disk_zone_is_full(disk, zwplug->zone_no, zwplug->wp_offset);
+}
+
static bool disk_insert_zone_wplug(struct gendisk *disk,
struct blk_zone_wplug *zwplug)
{
@@ -543,7 +562,7 @@ static inline bool disk_should_remove_zone_wplug(struct gendisk *disk,
return false;
/* We can remove zone write plugs for zones that are empty or full. */
- return !zwplug->wp_offset || zwplug->wp_offset >= disk->zone_capacity;
+ return !zwplug->wp_offset || disk_zone_wplug_is_full(disk, zwplug);
}
static void disk_remove_zone_wplug(struct gendisk *disk,
@@ -664,13 +683,12 @@ static void disk_zone_wplug_abort(struct blk_zone_wplug *zwplug)
static void disk_zone_wplug_abort_unaligned(struct gendisk *disk,
struct blk_zone_wplug *zwplug)
{
- unsigned int zone_capacity = disk->zone_capacity;
unsigned int wp_offset = zwplug->wp_offset;
struct bio_list bl = BIO_EMPTY_LIST;
struct bio *bio;
while ((bio = bio_list_pop(&zwplug->bio_list))) {
- if (wp_offset >= zone_capacity ||
+ if (disk_zone_is_full(disk, zwplug->zone_no, wp_offset) ||
(bio_op(bio) != REQ_OP_ZONE_APPEND &&
bio_offset_from_zone_start(bio) != wp_offset)) {
blk_zone_wplug_bio_io_error(zwplug, bio);
@@ -909,7 +927,6 @@ void blk_zone_write_plug_init_request(struct request *req)
sector_t req_back_sector = blk_rq_pos(req) + blk_rq_sectors(req);
struct request_queue *q = req->q;
struct gendisk *disk = q->disk;
- unsigned int zone_capacity = disk->zone_capacity;
struct blk_zone_wplug *zwplug =
disk_get_zone_wplug(disk, blk_rq_pos(req));
unsigned long flags;
@@ -933,7 +950,7 @@ void blk_zone_write_plug_init_request(struct request *req)
* into the back of the request.
*/
spin_lock_irqsave(&zwplug->lock, flags);
- while (zwplug->wp_offset < zone_capacity) {
+ while (!disk_zone_wplug_is_full(disk, zwplug)) {
bio = bio_list_peek(&zwplug->bio_list);
if (!bio)
break;
@@ -979,7 +996,7 @@ static bool blk_zone_wplug_prepare_bio(struct blk_zone_wplug *zwplug,
* We know such BIO will fail, and that would potentially overflow our
* write pointer offset beyond the end of the zone.
*/
- if (zwplug->wp_offset >= disk->zone_capacity)
+ if (disk_zone_wplug_is_full(disk, zwplug))
goto err;
if (bio_op(bio) == REQ_OP_ZONE_APPEND) {
@@ -1257,7 +1274,7 @@ void blk_zone_write_plug_bio_endio(struct bio *bio)
* is not called. So we need to schedule execution of the next
* plugged BIO here.
*/
- if (bio->bi_bdev->bd_has_submit_bio)
+ if (bdev_test_flag(bio->bi_bdev, BD_HAS_SUBMIT_BIO))
disk_zone_wplug_unplug_bio(disk, zwplug);
/* Drop the reference we took when entering this function. */
@@ -1326,7 +1343,7 @@ static void blk_zone_wplug_bio_work(struct work_struct *work)
* path for BIO-based devices will not do that. So drop this extra
* reference here.
*/
- if (bdev->bd_has_submit_bio)
+ if (bdev_test_flag(bdev, BD_HAS_SUBMIT_BIO))
blk_queue_exit(bdev->bd_disk->queue);
put_zwplug:
@@ -1535,6 +1552,9 @@ static void disk_destroy_zone_wplugs_hash_table(struct gendisk *disk)
void disk_free_zone_resources(struct gendisk *disk)
{
+ if (!disk->zone_wplugs_pool)
+ return;
+
cancel_work_sync(&disk->zone_wplugs_work);
if (disk->zone_wplugs_wq) {
@@ -1556,6 +1576,7 @@ void disk_free_zone_resources(struct gendisk *disk)
kfree(disk->conv_zones_bitmap);
disk->conv_zones_bitmap = NULL;
disk->zone_capacity = 0;
+ disk->last_zone_capacity = 0;
disk->nr_zones = 0;
}
@@ -1600,6 +1621,7 @@ struct blk_revalidate_zone_args {
unsigned long *conv_zones_bitmap;
unsigned int nr_zones;
unsigned int zone_capacity;
+ unsigned int last_zone_capacity;
sector_t sector;
};
@@ -1617,6 +1639,7 @@ static int disk_update_zone_resources(struct gendisk *disk,
disk->nr_zones = args->nr_zones;
disk->zone_capacity = args->zone_capacity;
+ disk->last_zone_capacity = args->last_zone_capacity;
swap(disk->conv_zones_bitmap, args->conv_zones_bitmap);
if (disk->conv_zones_bitmap)
nr_conv_zones = bitmap_weight(disk->conv_zones_bitmap,
@@ -1668,6 +1691,9 @@ static int blk_revalidate_conv_zone(struct blk_zone *zone, unsigned int idx,
return -ENODEV;
}
+ if (disk_zone_is_last(disk, zone))
+ args->last_zone_capacity = zone->capacity;
+
if (!disk_need_zone_resources(disk))
return 0;
@@ -1693,11 +1719,14 @@ static int blk_revalidate_seq_zone(struct blk_zone *zone, unsigned int idx,
/*
* Remember the capacity of the first sequential zone and check
- * if it is constant for all zones.
+ * if it is constant for all zones, ignoring the last zone as it can be
+ * smaller.
*/
if (!args->zone_capacity)
args->zone_capacity = zone->capacity;
- if (zone->capacity != args->zone_capacity) {
+ if (disk_zone_is_last(disk, zone)) {
+ args->last_zone_capacity = zone->capacity;
+ } else if (zone->capacity != args->zone_capacity) {
pr_warn("%s: Invalid variable zone capacity\n",
disk->disk_name);
return -ENODEV;
@@ -1732,7 +1761,6 @@ static int blk_revalidate_zone_cb(struct blk_zone *zone, unsigned int idx,
{
struct blk_revalidate_zone_args *args = data;
struct gendisk *disk = args->disk;
- sector_t capacity = get_capacity(disk);
sector_t zone_sectors = disk->queue->limits.chunk_sectors;
int ret;
@@ -1743,7 +1771,7 @@ static int blk_revalidate_zone_cb(struct blk_zone *zone, unsigned int idx,
return -ENODEV;
}
- if (zone->start >= capacity || !zone->len) {
+ if (zone->start >= get_capacity(disk) || !zone->len) {
pr_warn("%s: Invalid zone start %llu, length %llu\n",
disk->disk_name, zone->start, zone->len);
return -ENODEV;
@@ -1753,7 +1781,7 @@ static int blk_revalidate_zone_cb(struct blk_zone *zone, unsigned int idx,
* All zones must have the same size, with the exception on an eventual
* smaller last zone.
*/
- if (zone->start + zone->len < capacity) {
+ if (!disk_zone_is_last(disk, zone)) {
if (zone->len != zone_sectors) {
pr_warn("%s: Invalid zoned device with non constant zone size\n",
disk->disk_name);
diff --git a/block/blk.h b/block/blk.h
index 6e94c10af798..189bc25beb50 100644
--- a/block/blk.h
+++ b/block/blk.h
@@ -499,6 +499,8 @@ static inline int blkdev_zone_mgmt_ioctl(struct block_device *bdev,
struct block_device *bdev_alloc(struct gendisk *disk, u8 partno);
void bdev_add(struct block_device *bdev, dev_t dev);
+void bdev_unhash(struct block_device *bdev);
+void bdev_drop(struct block_device *bdev);
int blk_alloc_ext_minor(void);
void blk_free_ext_minor(unsigned int minor);
diff --git a/block/early-lookup.c b/block/early-lookup.c
index 3effbd0d35e9..3fb57f7d2b12 100644
--- a/block/early-lookup.c
+++ b/block/early-lookup.c
@@ -78,7 +78,7 @@ static int __init devt_from_partuuid(const char *uuid_str, dev_t *devt)
* to the partition number found by UUID.
*/
*devt = part_devt(dev_to_disk(dev),
- dev_to_bdev(dev)->bd_partno + offset);
+ bdev_partno(dev_to_bdev(dev)) + offset);
} else {
*devt = dev->devt;
}
diff --git a/block/fops.c b/block/fops.c
index 7a163f7fe2d8..376265935714 100644
--- a/block/fops.c
+++ b/block/fops.c
@@ -663,8 +663,8 @@ static ssize_t blkdev_buffered_write(struct kiocb *iocb, struct iov_iter *from)
static ssize_t blkdev_write_iter(struct kiocb *iocb, struct iov_iter *from)
{
struct file *file = iocb->ki_filp;
- struct block_device *bdev = I_BDEV(file->f_mapping->host);
- struct inode *bd_inode = bdev->bd_inode;
+ struct inode *bd_inode = bdev_file_inode(file);
+ struct block_device *bdev = I_BDEV(bd_inode);
loff_t size = bdev_nr_bytes(bdev);
size_t shorted = 0;
ssize_t ret;
diff --git a/block/genhd.c b/block/genhd.c
index 7f39fbe60753..8f1f3c6b4d67 100644
--- a/block/genhd.c
+++ b/block/genhd.c
@@ -411,7 +411,8 @@ int __must_check device_add_disk(struct device *parent, struct gendisk *disk,
elevator_init_mq(disk->queue);
/* Mark bdev as having a submit_bio, if needed */
- disk->part0->bd_has_submit_bio = disk->fops->submit_bio != NULL;
+ if (disk->fops->submit_bio)
+ bdev_set_flag(disk->part0, BD_HAS_SUBMIT_BIO);
/*
* If the driver provides an explicit major number it also must provide
@@ -653,7 +654,7 @@ void del_gendisk(struct gendisk *disk)
*/
mutex_lock(&disk->open_mutex);
xa_for_each(&disk->part_tbl, idx, part)
- remove_inode_hash(part->bd_inode);
+ bdev_unhash(part);
mutex_unlock(&disk->open_mutex);
/*
@@ -742,7 +743,7 @@ void invalidate_disk(struct gendisk *disk)
struct block_device *bdev = disk->part0;
invalidate_bdev(bdev);
- bdev->bd_inode->i_mapping->wb_err = 0;
+ bdev->bd_mapping->wb_err = 0;
set_capacity(disk, 0);
}
EXPORT_SYMBOL(invalidate_disk);
@@ -1064,7 +1065,8 @@ static DEVICE_ATTR(partscan, 0444, partscan_show, NULL);
ssize_t part_fail_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- return sprintf(buf, "%d\n", dev_to_bdev(dev)->bd_make_it_fail);
+ return sprintf(buf, "%d\n",
+ bdev_test_flag(dev_to_bdev(dev), BD_MAKE_IT_FAIL));
}
ssize_t part_fail_store(struct device *dev,
@@ -1073,9 +1075,12 @@ ssize_t part_fail_store(struct device *dev,
{
int i;
- if (count > 0 && sscanf(buf, "%d", &i) > 0)
- dev_to_bdev(dev)->bd_make_it_fail = i;
-
+ if (count > 0 && sscanf(buf, "%d", &i) > 0) {
+ if (i)
+ bdev_set_flag(dev_to_bdev(dev), BD_MAKE_IT_FAIL);
+ else
+ bdev_clear_flag(dev_to_bdev(dev), BD_MAKE_IT_FAIL);
+ }
return count;
}
@@ -1191,7 +1196,7 @@ static void disk_release(struct device *dev)
if (test_bit(GD_ADDED, &disk->state) && disk->fops->free_disk)
disk->fops->free_disk(disk);
- iput(disk->part0->bd_inode); /* frees the disk */
+ bdev_drop(disk->part0); /* frees the disk */
}
static int block_uevent(const struct device *dev, struct kobj_uevent_env *env)
@@ -1379,7 +1384,7 @@ out_erase_part0:
out_destroy_part_tbl:
xa_destroy(&disk->part_tbl);
disk->part0->bd_disk = NULL;
- iput(disk->part0->bd_inode);
+ bdev_drop(disk->part0);
out_free_bdi:
bdi_put(disk->bdi);
out_free_bioset:
diff --git a/block/ioctl.c b/block/ioctl.c
index c7db3bd2d653..d570e1695896 100644
--- a/block/ioctl.c
+++ b/block/ioctl.c
@@ -96,7 +96,6 @@ static int blk_ioctl_discard(struct block_device *bdev, blk_mode_t mode,
unsigned long arg)
{
unsigned int bs_mask = bdev_logical_block_size(bdev) - 1;
- struct inode *inode = bdev->bd_inode;
uint64_t range[2], start, len, end;
struct bio *prev = NULL, *bio;
sector_t sector, nr_sects;
@@ -126,7 +125,7 @@ static int blk_ioctl_discard(struct block_device *bdev, blk_mode_t mode,
end > bdev_nr_bytes(bdev))
return -EINVAL;
- filemap_invalidate_lock(inode->i_mapping);
+ filemap_invalidate_lock(bdev->bd_mapping);
err = truncate_bdev_range(bdev, mode, start, start + len - 1);
if (err)
goto fail;
@@ -157,7 +156,7 @@ static int blk_ioctl_discard(struct block_device *bdev, blk_mode_t mode,
out_unplug:
blk_finish_plug(&plug);
fail:
- filemap_invalidate_unlock(inode->i_mapping);
+ filemap_invalidate_unlock(bdev->bd_mapping);
return err;
}
@@ -182,12 +181,12 @@ static int blk_ioctl_secure_erase(struct block_device *bdev, blk_mode_t mode,
if (start + len > bdev_nr_bytes(bdev))
return -EINVAL;
- filemap_invalidate_lock(bdev->bd_inode->i_mapping);
+ filemap_invalidate_lock(bdev->bd_mapping);
err = truncate_bdev_range(bdev, mode, start, start + len - 1);
if (!err)
err = blkdev_issue_secure_erase(bdev, start >> 9, len >> 9,
GFP_KERNEL);
- filemap_invalidate_unlock(bdev->bd_inode->i_mapping);
+ filemap_invalidate_unlock(bdev->bd_mapping);
return err;
}
@@ -197,7 +196,6 @@ static int blk_ioctl_zeroout(struct block_device *bdev, blk_mode_t mode,
{
uint64_t range[2];
uint64_t start, end, len;
- struct inode *inode = bdev->bd_inode;
int err;
if (!(mode & BLK_OPEN_WRITE))
@@ -220,7 +218,7 @@ static int blk_ioctl_zeroout(struct block_device *bdev, blk_mode_t mode,
return -EINVAL;
/* Invalidate the page cache, including dirty pages */
- filemap_invalidate_lock(inode->i_mapping);
+ filemap_invalidate_lock(bdev->bd_mapping);
err = truncate_bdev_range(bdev, mode, start, end);
if (err)
goto fail;
@@ -229,7 +227,7 @@ static int blk_ioctl_zeroout(struct block_device *bdev, blk_mode_t mode,
BLKDEV_ZERO_NOUNMAP);
fail:
- filemap_invalidate_unlock(inode->i_mapping);
+ filemap_invalidate_unlock(bdev->bd_mapping);
return err;
}
@@ -433,7 +431,10 @@ static int blkdev_roset(struct block_device *bdev, unsigned cmd,
if (ret)
return ret;
}
- bdev->bd_read_only = n;
+ if (n)
+ bdev_set_flag(bdev, BD_READ_ONLY);
+ else
+ bdev_clear_flag(bdev, BD_READ_ONLY);
return 0;
}
@@ -503,11 +504,14 @@ static int compat_hdio_getgeo(struct block_device *bdev,
#endif
/* set the logical block size */
-static int blkdev_bszset(struct block_device *bdev, blk_mode_t mode,
+static int blkdev_bszset(struct file *file, blk_mode_t mode,
int __user *argp)
{
+ // this one might be file_inode(file)->i_rdev - a rare valid
+ // use of file_inode() for those.
+ dev_t dev = I_BDEV(file->f_mapping->host)->bd_dev;
+ struct file *excl_file;
int ret, n;
- struct file *file;
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
@@ -517,13 +521,13 @@ static int blkdev_bszset(struct block_device *bdev, blk_mode_t mode,
return -EFAULT;
if (mode & BLK_OPEN_EXCL)
- return set_blocksize(bdev, n);
+ return set_blocksize(file, n);
- file = bdev_file_open_by_dev(bdev->bd_dev, mode, &bdev, NULL);
- if (IS_ERR(file))
+ excl_file = bdev_file_open_by_dev(dev, mode, &dev, NULL);
+ if (IS_ERR(excl_file))
return -EBUSY;
- ret = set_blocksize(bdev, n);
- fput(file);
+ ret = set_blocksize(excl_file, n);
+ fput(excl_file);
return ret;
}
@@ -652,7 +656,7 @@ long blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg)
case BLKBSZGET: /* get block device soft block size (cf. BLKSSZGET) */
return put_int(argp, block_size(bdev));
case BLKBSZSET:
- return blkdev_bszset(bdev, mode, argp);
+ return blkdev_bszset(file, mode, argp);
case BLKGETSIZE64:
return put_u64(argp, bdev_nr_bytes(bdev));
@@ -712,7 +716,7 @@ long compat_blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg)
case BLKBSZGET_32: /* get the logical block size (cf. BLKSSZGET) */
return put_int(argp, bdev_logical_block_size(bdev));
case BLKBSZSET_32:
- return blkdev_bszset(bdev, mode, argp);
+ return blkdev_bszset(file, mode, argp);
case BLKGETSIZE64_32:
return put_u64(argp, bdev_nr_bytes(bdev));
diff --git a/block/partitions/core.c b/block/partitions/core.c
index 37b5f92d07fe..ab76e64f0f6c 100644
--- a/block/partitions/core.c
+++ b/block/partitions/core.c
@@ -173,7 +173,7 @@ static struct parsed_partitions *check_partition(struct gendisk *hd)
static ssize_t part_partition_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- return sprintf(buf, "%d\n", dev_to_bdev(dev)->bd_partno);
+ return sprintf(buf, "%d\n", bdev_partno(dev_to_bdev(dev)));
}
static ssize_t part_start_show(struct device *dev,
@@ -243,14 +243,14 @@ static const struct attribute_group *part_attr_groups[] = {
static void part_release(struct device *dev)
{
put_disk(dev_to_bdev(dev)->bd_disk);
- iput(dev_to_bdev(dev)->bd_inode);
+ bdev_drop(dev_to_bdev(dev));
}
static int part_uevent(const struct device *dev, struct kobj_uevent_env *env)
{
const struct block_device *part = dev_to_bdev(dev);
- add_uevent_var(env, "PARTN=%u", part->bd_partno);
+ add_uevent_var(env, "PARTN=%u", bdev_partno(part));
if (part->bd_meta_info && part->bd_meta_info->volname[0])
add_uevent_var(env, "PARTNAME=%s", part->bd_meta_info->volname);
return 0;
@@ -267,7 +267,7 @@ void drop_partition(struct block_device *part)
{
lockdep_assert_held(&part->bd_disk->open_mutex);
- xa_erase(&part->bd_disk->part_tbl, part->bd_partno);
+ xa_erase(&part->bd_disk->part_tbl, bdev_partno(part));
kobject_put(part->bd_holder_dir);
device_del(&part->bd_device);
@@ -338,8 +338,8 @@ static struct block_device *add_partition(struct gendisk *disk, int partno,
pdev->parent = ddev;
/* in consecutive minor range? */
- if (bdev->bd_partno < disk->minors) {
- devt = MKDEV(disk->major, disk->first_minor + bdev->bd_partno);
+ if (bdev_partno(bdev) < disk->minors) {
+ devt = MKDEV(disk->major, disk->first_minor + bdev_partno(bdev));
} else {
err = blk_alloc_ext_minor();
if (err < 0)
@@ -404,7 +404,7 @@ static bool partition_overlaps(struct gendisk *disk, sector_t start,
rcu_read_lock();
xa_for_each_start(&disk->part_tbl, idx, part, 1) {
- if (part->bd_partno != skip_partno &&
+ if (bdev_partno(part) != skip_partno &&
start < part->bd_start_sect + bdev_nr_sectors(part) &&
start + length > part->bd_start_sect) {
overlap = true;
@@ -469,7 +469,7 @@ int bdev_del_partition(struct gendisk *disk, int partno)
* Just delete the partition and invalidate it.
*/
- remove_inode_hash(part->bd_inode);
+ bdev_unhash(part);
invalidate_bdev(part);
drop_partition(part);
ret = 0;
@@ -652,7 +652,7 @@ rescan:
* it cannot be looked up any more even when openers
* still hold references.
*/
- remove_inode_hash(part->bd_inode);
+ bdev_unhash(part);
/*
* If @disk->open_partitions isn't elevated but there's
@@ -701,7 +701,7 @@ EXPORT_SYMBOL_GPL(bdev_disk_changed);
void *read_part_sector(struct parsed_partitions *state, sector_t n, Sector *p)
{
- struct address_space *mapping = state->disk->part0->bd_inode->i_mapping;
+ struct address_space *mapping = state->disk->part0->bd_mapping;
struct folio *folio;
if (n >= get_capacity(state->disk)) {
diff --git a/block/partitions/ldm.c b/block/partitions/ldm.c
index 38e58960ae03..2bd42fedb907 100644
--- a/block/partitions/ldm.c
+++ b/block/partitions/ldm.c
@@ -131,8 +131,7 @@ static bool ldm_parse_tocblock (const u8 *data, struct tocblock *toc)
ldm_crit ("Cannot find TOCBLOCK, database may be corrupt.");
return false;
}
- strncpy (toc->bitmap1_name, data + 0x24, sizeof (toc->bitmap1_name));
- toc->bitmap1_name[sizeof (toc->bitmap1_name) - 1] = 0;
+ strscpy_pad(toc->bitmap1_name, data + 0x24, sizeof(toc->bitmap1_name));
toc->bitmap1_start = get_unaligned_be64(data + 0x2E);
toc->bitmap1_size = get_unaligned_be64(data + 0x36);
@@ -142,8 +141,7 @@ static bool ldm_parse_tocblock (const u8 *data, struct tocblock *toc)
TOC_BITMAP1, toc->bitmap1_name);
return false;
}
- strncpy (toc->bitmap2_name, data + 0x46, sizeof (toc->bitmap2_name));
- toc->bitmap2_name[sizeof (toc->bitmap2_name) - 1] = 0;
+ strscpy_pad(toc->bitmap2_name, data + 0x46, sizeof(toc->bitmap2_name));
toc->bitmap2_start = get_unaligned_be64(data + 0x50);
toc->bitmap2_size = get_unaligned_be64(data + 0x58);
if (strncmp (toc->bitmap2_name, TOC_BITMAP2,
diff --git a/block/sed-opal.c b/block/sed-opal.c
index 14fe0fef811c..598fd3e7fcc8 100644
--- a/block/sed-opal.c
+++ b/block/sed-opal.c
@@ -314,7 +314,7 @@ static int read_sed_opal_key(const char *key_name, u_char *buffer, int buflen)
&key_type_user, key_name, true);
if (IS_ERR(kref))
- ret = PTR_ERR(kref);
+ return PTR_ERR(kref);
key = key_ref_to_ptr(kref);
down_read(&key->sem);
diff --git a/block/t10-pi.c b/block/t10-pi.c
index d90892fd6f2a..f4cc91156da1 100644
--- a/block/t10-pi.c
+++ b/block/t10-pi.c
@@ -495,5 +495,5 @@ const struct blk_integrity_profile ext_pi_type3_crc64 = {
};
EXPORT_SYMBOL_GPL(ext_pi_type3_crc64);
-MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("T10 Protection Information module");
MODULE_LICENSE("GPL");