diff options
Diffstat (limited to 'block')
-rw-r--r-- | block/Kconfig | 2 | ||||
-rw-r--r-- | block/bio.c | 3 | ||||
-rw-r--r-- | block/blk-merge.c | 18 | ||||
-rw-r--r-- | block/blk-settings.c | 3 | ||||
-rw-r--r-- | block/blk-sysfs.c | 5 | ||||
-rw-r--r-- | block/blk-zoned.c | 2 | ||||
-rw-r--r-- | block/kyber-iosched.c | 28 |
7 files changed, 27 insertions, 34 deletions
diff --git a/block/Kconfig b/block/Kconfig index 8044452a4fd3..028bc085dac8 100644 --- a/block/Kconfig +++ b/block/Kconfig @@ -218,4 +218,4 @@ config BLK_MQ_RDMA config BLK_PM def_bool BLOCK && PM -source block/Kconfig.iosched +source "block/Kconfig.iosched" diff --git a/block/bio.c b/block/bio.c index 9194d8ad3d5e..4db1008309ed 100644 --- a/block/bio.c +++ b/block/bio.c @@ -1260,7 +1260,8 @@ struct bio *bio_copy_user_iov(struct request_queue *q, if (ret) goto cleanup; } else { - zero_fill_bio(bio); + if (bmd->is_our_pages) + zero_fill_bio(bio); iov_iter_advance(iter, bio->bi_iter.bi_size); } diff --git a/block/blk-merge.c b/block/blk-merge.c index e7f1c6cf0167..71e9ac03f621 100644 --- a/block/blk-merge.c +++ b/block/blk-merge.c @@ -195,7 +195,7 @@ static struct bio *blk_bio_segment_split(struct request_queue *q, goto split; } - if (bvprvp && blk_queue_cluster(q)) { + if (bvprvp) { if (seg_size + bv.bv_len > queue_max_segment_size(q)) goto new_segment; if (!biovec_phys_mergeable(q, bvprvp, &bv)) @@ -295,7 +295,7 @@ static unsigned int __blk_recalc_rq_segments(struct request_queue *q, bool no_sg_merge) { struct bio_vec bv, bvprv = { NULL }; - int cluster, prev = 0; + int prev = 0; unsigned int seg_size, nr_phys_segs; struct bio *fbio, *bbio; struct bvec_iter iter; @@ -313,7 +313,6 @@ static unsigned int __blk_recalc_rq_segments(struct request_queue *q, } fbio = bio; - cluster = blk_queue_cluster(q); seg_size = 0; nr_phys_segs = 0; for_each_bio(bio) { @@ -325,7 +324,7 @@ static unsigned int __blk_recalc_rq_segments(struct request_queue *q, if (no_sg_merge) goto new_segment; - if (prev && cluster) { + if (prev) { if (seg_size + bv.bv_len > queue_max_segment_size(q)) goto new_segment; @@ -395,9 +394,6 @@ static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio, { struct bio_vec end_bv = { NULL }, nxt_bv; - if (!blk_queue_cluster(q)) - return 0; - if (bio->bi_seg_back_size + nxt->bi_seg_front_size > queue_max_segment_size(q)) return 0; @@ -414,12 +410,12 @@ static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio, static inline void __blk_segment_map_sg(struct request_queue *q, struct bio_vec *bvec, struct scatterlist *sglist, struct bio_vec *bvprv, - struct scatterlist **sg, int *nsegs, int *cluster) + struct scatterlist **sg, int *nsegs) { int nbytes = bvec->bv_len; - if (*sg && *cluster) { + if (*sg) { if ((*sg)->length + nbytes > queue_max_segment_size(q)) goto new_segment; if (!biovec_phys_mergeable(q, bvprv, bvec)) @@ -465,12 +461,12 @@ static int __blk_bios_map_sg(struct request_queue *q, struct bio *bio, { struct bio_vec bvec, bvprv = { NULL }; struct bvec_iter iter; - int cluster = blk_queue_cluster(q), nsegs = 0; + int nsegs = 0; for_each_bio(bio) bio_for_each_segment(bvec, bio, iter) __blk_segment_map_sg(q, &bvec, sglist, &bvprv, sg, - &nsegs, &cluster); + &nsegs); return nsegs; } diff --git a/block/blk-settings.c b/block/blk-settings.c index 3abe831e92c8..3e7038e475ee 100644 --- a/block/blk-settings.c +++ b/block/blk-settings.c @@ -56,7 +56,6 @@ void blk_set_default_limits(struct queue_limits *lim) lim->alignment_offset = 0; lim->io_opt = 0; lim->misaligned = 0; - lim->cluster = 1; lim->zoned = BLK_ZONED_NONE; } EXPORT_SYMBOL(blk_set_default_limits); @@ -547,8 +546,6 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b, t->io_min = max(t->io_min, b->io_min); t->io_opt = lcm_not_zero(t->io_opt, b->io_opt); - t->cluster &= b->cluster; - /* Physical block size a multiple of the logical block size? */ if (t->physical_block_size & (t->logical_block_size - 1)) { t->physical_block_size = t->logical_block_size; diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c index 0619c8922893..590d1ef2f961 100644 --- a/block/blk-sysfs.c +++ b/block/blk-sysfs.c @@ -132,10 +132,7 @@ static ssize_t queue_max_integrity_segments_show(struct request_queue *q, char * static ssize_t queue_max_segment_size_show(struct request_queue *q, char *page) { - if (blk_queue_cluster(q)) - return queue_var_show(queue_max_segment_size(q), (page)); - - return queue_var_show(PAGE_SIZE, (page)); + return queue_var_show(queue_max_segment_size(q), (page)); } static ssize_t queue_logical_block_size_show(struct request_queue *q, char *page) diff --git a/block/blk-zoned.c b/block/blk-zoned.c index e9c332b1d9da..2d98803faec2 100644 --- a/block/blk-zoned.c +++ b/block/blk-zoned.c @@ -378,7 +378,7 @@ static struct blk_zone *blk_alloc_zones(int node, unsigned int *nr_zones) struct page *page; int order; - for (order = get_order(size); order > 0; order--) { + for (order = get_order(size); order >= 0; order--) { page = alloc_pages_node(node, GFP_NOIO | __GFP_ZERO, order); if (page) { *nr_zones = min_t(unsigned int, *nr_zones, diff --git a/block/kyber-iosched.c b/block/kyber-iosched.c index de78e8aa7b0a..ec6a04e01bc1 100644 --- a/block/kyber-iosched.c +++ b/block/kyber-iosched.c @@ -195,7 +195,7 @@ struct kyber_hctx_data { unsigned int batching; struct kyber_ctx_queue *kcqs; struct sbitmap kcq_map[KYBER_NUM_DOMAINS]; - wait_queue_entry_t domain_wait[KYBER_NUM_DOMAINS]; + struct sbq_wait domain_wait[KYBER_NUM_DOMAINS]; struct sbq_wait_state *domain_ws[KYBER_NUM_DOMAINS]; atomic_t wait_index[KYBER_NUM_DOMAINS]; }; @@ -501,10 +501,11 @@ static int kyber_init_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx) for (i = 0; i < KYBER_NUM_DOMAINS; i++) { INIT_LIST_HEAD(&khd->rqs[i]); - init_waitqueue_func_entry(&khd->domain_wait[i], + khd->domain_wait[i].sbq = NULL; + init_waitqueue_func_entry(&khd->domain_wait[i].wait, kyber_domain_wake); - khd->domain_wait[i].private = hctx; - INIT_LIST_HEAD(&khd->domain_wait[i].entry); + khd->domain_wait[i].wait.private = hctx; + INIT_LIST_HEAD(&khd->domain_wait[i].wait.entry); atomic_set(&khd->wait_index[i], 0); } @@ -698,12 +699,13 @@ static void kyber_flush_busy_kcqs(struct kyber_hctx_data *khd, flush_busy_kcq, &data); } -static int kyber_domain_wake(wait_queue_entry_t *wait, unsigned mode, int flags, +static int kyber_domain_wake(wait_queue_entry_t *wqe, unsigned mode, int flags, void *key) { - struct blk_mq_hw_ctx *hctx = READ_ONCE(wait->private); + struct blk_mq_hw_ctx *hctx = READ_ONCE(wqe->private); + struct sbq_wait *wait = container_of(wqe, struct sbq_wait, wait); - list_del_init(&wait->entry); + sbitmap_del_wait_queue(wait); blk_mq_run_hw_queue(hctx, true); return 1; } @@ -714,7 +716,7 @@ static int kyber_get_domain_token(struct kyber_queue_data *kqd, { unsigned int sched_domain = khd->cur_domain; struct sbitmap_queue *domain_tokens = &kqd->domain_tokens[sched_domain]; - wait_queue_entry_t *wait = &khd->domain_wait[sched_domain]; + struct sbq_wait *wait = &khd->domain_wait[sched_domain]; struct sbq_wait_state *ws; int nr; @@ -725,11 +727,11 @@ static int kyber_get_domain_token(struct kyber_queue_data *kqd, * run when one becomes available. Note that this is serialized on * khd->lock, but we still need to be careful about the waker. */ - if (nr < 0 && list_empty_careful(&wait->entry)) { + if (nr < 0 && list_empty_careful(&wait->wait.entry)) { ws = sbq_wait_ptr(domain_tokens, &khd->wait_index[sched_domain]); khd->domain_ws[sched_domain] = ws; - add_wait_queue(&ws->wait, wait); + sbitmap_add_wait_queue(domain_tokens, ws, wait); /* * Try again in case a token was freed before we got on the wait @@ -745,10 +747,10 @@ static int kyber_get_domain_token(struct kyber_queue_data *kqd, * between the !list_empty_careful() check and us grabbing the lock, but * list_del_init() is okay with that. */ - if (nr >= 0 && !list_empty_careful(&wait->entry)) { + if (nr >= 0 && !list_empty_careful(&wait->wait.entry)) { ws = khd->domain_ws[sched_domain]; spin_lock_irq(&ws->wait.lock); - list_del_init(&wait->entry); + sbitmap_del_wait_queue(wait); spin_unlock_irq(&ws->wait.lock); } @@ -951,7 +953,7 @@ static int kyber_##name##_waiting_show(void *data, struct seq_file *m) \ { \ struct blk_mq_hw_ctx *hctx = data; \ struct kyber_hctx_data *khd = hctx->sched_data; \ - wait_queue_entry_t *wait = &khd->domain_wait[domain]; \ + wait_queue_entry_t *wait = &khd->domain_wait[domain].wait; \ \ seq_printf(m, "%d\n", !list_empty_careful(&wait->entry)); \ return 0; \ |