summaryrefslogtreecommitdiff
path: root/block/blk-mq.c
diff options
context:
space:
mode:
authorMing Lei <ming.lei@redhat.com>2021-12-03 21:15:33 +0800
committerJens Axboe <axboe@kernel.dk>2021-12-03 14:51:29 -0700
commitbcc330f42f442a98d61f153d16c0b6487461ee81 (patch)
tree33da728b0839442c866d10bea20375615a142521 /block/blk-mq.c
parent704b914f15fb7daaf517e3acc4bed472b50ca19e (diff)
downloadlinux-next-bcc330f42f442a98d61f153d16c0b6487461ee81.tar.gz
blk-mq: pass request queue to blk_mq_run_dispatch_ops
We have switched to allocate srcu into request queue, so it is fine to pass request queue to blk_mq_run_dispatch_ops(). Signed-off-by: Ming Lei <ming.lei@redhat.com> Link: https://lore.kernel.org/r/20211203131534.3668411-4-ming.lei@redhat.com Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block/blk-mq.c')
-rw-r--r--block/blk-mq.c9
1 files changed, 5 insertions, 4 deletions
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 6a2c2704454e..24c65bb8719b 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -1925,7 +1925,8 @@ static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx)
*/
WARN_ON_ONCE(in_interrupt());
- blk_mq_run_dispatch_ops(hctx, blk_mq_sched_dispatch_requests(hctx));
+ blk_mq_run_dispatch_ops(hctx->queue,
+ blk_mq_sched_dispatch_requests(hctx));
}
static inline int blk_mq_first_mapped_cpu(struct blk_mq_hw_ctx *hctx)
@@ -2047,7 +2048,7 @@ void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
* And queue will be rerun in blk_mq_unquiesce_queue() if it is
* quiesced.
*/
- blk_mq_run_dispatch_ops(hctx,
+ blk_mq_run_dispatch_ops(hctx->queue,
need_run = !blk_queue_quiesced(hctx->queue) &&
blk_mq_hctx_has_pending(hctx));
@@ -2466,7 +2467,7 @@ static blk_status_t blk_mq_request_issue_directly(struct request *rq, bool last)
blk_status_t ret;
struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
- blk_mq_run_dispatch_ops(hctx,
+ blk_mq_run_dispatch_ops(rq->q,
ret = __blk_mq_try_issue_directly(hctx, rq, true, last));
return ret;
}
@@ -2780,7 +2781,7 @@ void blk_mq_submit_bio(struct bio *bio)
(q->nr_hw_queues == 1 || !is_sync)))
blk_mq_sched_insert_request(rq, false, true, true);
else
- blk_mq_run_dispatch_ops(rq->mq_hctx,
+ blk_mq_run_dispatch_ops(rq->q,
blk_mq_try_issue_directly(rq->mq_hctx, rq));
}