aboutsummaryrefslogtreecommitdiff
path: root/block/blk-mq.c
diff options
context:
space:
mode:
authorGravatar Linus Torvalds <torvalds@linux-foundation.org> 2022-06-03 10:14:48 -0700
committerGravatar Linus Torvalds <torvalds@linux-foundation.org> 2022-06-03 10:14:48 -0700
commit34845d92bca527b5c2cf8b2293b71b9c746c79ca (patch)
treea1fd77581f6275843d0305a966d42d5b1b4748cf /block/blk-mq.c
parentMerge tag 'io_uring-5.19-2022-06-02' of git://git.kernel.dk/linux-block (diff)
parentblock: Fix potential deadlock in blk_ia_range_sysfs_show() (diff)
downloadlinux-34845d92bca527b5c2cf8b2293b71b9c746c79ca.tar.gz
linux-34845d92bca527b5c2cf8b2293b71b9c746c79ca.tar.bz2
linux-34845d92bca527b5c2cf8b2293b71b9c746c79ca.zip
Merge tag 'for-5.19/block-2022-06-02' of git://git.kernel.dk/linux-block
Pull block fixes from Jens Axboe: "Just a collection of fixes that have been queued up since the initial merge window pull request, the majority of which are targeted for stable as well. One bio_set fix that fixes an issue with the dm adoption of cached bio structs that got introduced in this merge window" * tag 'for-5.19/block-2022-06-02' of git://git.kernel.dk/linux-block: block: Fix potential deadlock in blk_ia_range_sysfs_show() block: fix bio_clone_blkg_association() to associate with proper blkcg_gq block: remove useless BUG_ON() in blk_mq_put_tag() blk-mq: do not update io_ticks with passthrough requests block: make bioset_exit() fully resilient against being called twice block: use bio_queue_enter instead of blk_queue_enter in bio_poll block: document BLK_STS_AGAIN usage block: take destination bvec offsets into account in bio_copy_data_iter blk-iolatency: Fix inflight count imbalances and IO hangs on offline blk-mq: don't touch ->tagset in blk_mq_get_sq_hctx
Diffstat (limited to 'block/blk-mq.c')
-rw-r--r--block/blk-mq.c10
1 files changed, 5 insertions, 5 deletions
diff --git a/block/blk-mq.c b/block/blk-mq.c
index ae116b755648..30e4bdcd8d7f 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -133,7 +133,8 @@ static bool blk_mq_check_inflight(struct request *rq, void *priv,
{
struct mq_inflight *mi = priv;
- if ((!mi->part->bd_partno || rq->part == mi->part) &&
+ if (rq->part && blk_do_io_stat(rq) &&
+ (!mi->part->bd_partno || rq->part == mi->part) &&
blk_mq_rq_state(rq) == MQ_RQ_IN_FLIGHT)
mi->inflight[rq_data_dir(rq)]++;
@@ -2174,8 +2175,7 @@ static bool blk_mq_has_sqsched(struct request_queue *q)
*/
static struct blk_mq_hw_ctx *blk_mq_get_sq_hctx(struct request_queue *q)
{
- struct blk_mq_hw_ctx *hctx;
-
+ struct blk_mq_ctx *ctx = blk_mq_get_ctx(q);
/*
* If the IO scheduler does not respect hardware queues when
* dispatching, we just don't bother with multiple HW queues and
@@ -2183,8 +2183,8 @@ static struct blk_mq_hw_ctx *blk_mq_get_sq_hctx(struct request_queue *q)
* just causes lock contention inside the scheduler and pointless cache
* bouncing.
*/
- hctx = blk_mq_map_queue_type(q, HCTX_TYPE_DEFAULT,
- raw_smp_processor_id());
+ struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, 0, ctx);
+
if (!blk_mq_hctx_stopped(hctx))
return hctx;
return NULL;