summaryrefslogtreecommitdiff
path: root/block/blk-mq-sched.h
diff options
context:
space:
mode:
authorJianchao Wang <jianchao.w.wang@oracle.com>2018-08-21 15:15:03 +0800
committerJens Axboe <axboe@kernel.dk>2018-08-21 09:02:55 -0600
commitd48ece209f82c9ce07be942441b53d3fa3664936 (patch)
treeddf957d9d4edf1915558bc528fee6696cc02f1ed /block/blk-mq-sched.h
parentfcedba42d94ecdc14ca13d3797cba1ccbf743fa4 (diff)
downloadlinux-next-d48ece209f82c9ce07be942441b53d3fa3664936.tar.gz
blk-mq: init hctx sched after update ctx and hctx mapping
Currently, when update nr_hw_queues, IO scheduler's init_hctx will be invoked before the mapping between ctx and hctx is adapted correctly by blk_mq_map_swqueue. The IO scheduler init_hctx (kyber) may depend on this mapping and get wrong result and panic finally. A simply way to fix this is that switch the IO scheduler to 'none' before update the nr_hw_queues, and then switch it back after update nr_hw_queues. blk_mq_sched_init_/exit_hctx are removed due to nobody use them any more. Signed-off-by: Jianchao Wang <jianchao.w.wang@oracle.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block/blk-mq-sched.h')
-rw-r--r--block/blk-mq-sched.h5
1 files changed, 0 insertions, 5 deletions
diff --git a/block/blk-mq-sched.h b/block/blk-mq-sched.h
index 0cb8f938dff9..4e028ee42430 100644
--- a/block/blk-mq-sched.h
+++ b/block/blk-mq-sched.h
@@ -28,11 +28,6 @@ void blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx);
int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e);
void blk_mq_exit_sched(struct request_queue *q, struct elevator_queue *e);
-int blk_mq_sched_init_hctx(struct request_queue *q, struct blk_mq_hw_ctx *hctx,
- unsigned int hctx_idx);
-void blk_mq_sched_exit_hctx(struct request_queue *q, struct blk_mq_hw_ctx *hctx,
- unsigned int hctx_idx);
-
static inline bool
blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio)
{