summaryrefslogtreecommitdiff
path: root/block
diff options
context:
space:
mode:
authorJens Axboe <axboe@kernel.dk>2018-01-09 14:23:42 -0700
committerJens Axboe <axboe@kernel.dk>2018-01-10 11:47:47 -0700
commit0a72e7f44964b9ada3e5c15820372e9cb119bf80 (patch)
tree08885e630c816c2d22951f29e1783c685400fcb1 /block
parent76a86f9d027b342b8759a4b2f9f7fe046e284220 (diff)
downloadlinux-rt-0a72e7f44964b9ada3e5c15820372e9cb119bf80.tar.gz
block: add accessors for setting/querying request deadline
We reduce the resolution of request expiry, but since we're already using jiffies for this where resolution depends on the kernel configuration and since the timeout resolution is coarse anyway, that should be fine. Reviewed-by: Bart Van Assche <bart.vanassche@wdc.com> Reviewed-by: Omar Sandoval <osandov@fb.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block')
-rw-r--r--block/blk-mq.c2
-rw-r--r--block/blk-timeout.c14
-rw-r--r--block/blk.h15
3 files changed, 24 insertions, 7 deletions
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 3239ca9e199f..7035c305be45 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -858,7 +858,7 @@ static void blk_mq_check_expired(struct blk_mq_hw_ctx *hctx,
while (true) {
start = read_seqcount_begin(&rq->gstate_seq);
gstate = READ_ONCE(rq->gstate);
- deadline = rq->deadline;
+ deadline = blk_rq_deadline(rq);
if (!read_seqcount_retry(&rq->gstate_seq, start))
break;
cond_resched();
diff --git a/block/blk-timeout.c b/block/blk-timeout.c
index ebe99963386c..a05e3676d24a 100644
--- a/block/blk-timeout.c
+++ b/block/blk-timeout.c
@@ -112,7 +112,9 @@ static void blk_rq_timed_out(struct request *req)
static void blk_rq_check_expired(struct request *rq, unsigned long *next_timeout,
unsigned int *next_set)
{
- if (time_after_eq(jiffies, rq->deadline)) {
+ const unsigned long deadline = blk_rq_deadline(rq);
+
+ if (time_after_eq(jiffies, deadline)) {
list_del_init(&rq->timeout_list);
/*
@@ -120,8 +122,8 @@ static void blk_rq_check_expired(struct request *rq, unsigned long *next_timeout
*/
if (!blk_mark_rq_complete(rq))
blk_rq_timed_out(rq);
- } else if (!*next_set || time_after(*next_timeout, rq->deadline)) {
- *next_timeout = rq->deadline;
+ } else if (!*next_set || time_after(*next_timeout, deadline)) {
+ *next_timeout = deadline;
*next_set = 1;
}
}
@@ -162,7 +164,7 @@ void blk_abort_request(struct request *req)
* immediately and that scan sees the new timeout value.
* No need for fancy synchronizations.
*/
- req->deadline = jiffies;
+ blk_rq_set_deadline(req, jiffies);
mod_timer(&req->q->timeout, 0);
} else {
if (blk_mark_rq_complete(req))
@@ -213,7 +215,7 @@ void blk_add_timer(struct request *req)
if (!req->timeout)
req->timeout = q->rq_timeout;
- req->deadline = jiffies + req->timeout;
+ blk_rq_set_deadline(req, jiffies + req->timeout);
req->rq_flags &= ~RQF_MQ_TIMEOUT_EXPIRED;
/*
@@ -228,7 +230,7 @@ void blk_add_timer(struct request *req)
* than an existing one, modify the timer. Round up to next nearest
* second.
*/
- expiry = blk_rq_timeout(round_jiffies_up(req->deadline));
+ expiry = blk_rq_timeout(round_jiffies_up(blk_rq_deadline(req)));
if (!timer_pending(&q->timeout) ||
time_before(expiry, q->timeout.expires)) {
diff --git a/block/blk.h b/block/blk.h
index eb306c52121e..bcd9cf7db0d4 100644
--- a/block/blk.h
+++ b/block/blk.h
@@ -237,6 +237,21 @@ static inline void req_set_nomerge(struct request_queue *q, struct request *req)
}
/*
+ * Steal a bit from this field for legacy IO path atomic IO marking. Note that
+ * setting the deadline clears the bottom bit, potentially clearing the
+ * completed bit. The user has to be OK with this (current ones are fine).
+ */
+static inline void blk_rq_set_deadline(struct request *rq, unsigned long time)
+{
+ rq->__deadline = time & ~0x1UL;
+}
+
+static inline unsigned long blk_rq_deadline(struct request *rq)
+{
+ return rq->__deadline & ~0x1UL;
+}
+
+/*
* Internal io_context interface
*/
void get_io_context(struct io_context *ioc);