diff options
author | Tejun Heo <tj@kernel.org> | 2012-03-05 13:15:26 -0800 |
---|---|---|
committer | Jens Axboe <axboe@kernel.dk> | 2012-03-06 21:27:24 +0100 |
commit | f6e8d01bee036460e03bd4f6a79d014f98ba712e (patch) | |
tree | acaaab3667e0450f0f05464426c3540c89ce4e18 /include/linux/iocontext.h | |
parent | 3d48749d93a3dce732dd30a14002ab90ec4355f3 (diff) | |
download | linux-next-f6e8d01bee036460e03bd4f6a79d014f98ba712e.tar.gz |
block: add io_context->active_ref
Currently ioc->nr_tasks is used to decide two things - whether an ioc
is done issuing IOs and whether it's shared by multiple tasks. This
patch separate out the first into ioc->active_ref, which is acquired
and released using {get|put}_io_context_active() respectively.
This will be used to associate bio's with a given task. This patch
doesn't introduce any visible behavior change.
Signed-off-by: Tejun Heo <tj@kernel.org>
Cc: Vivek Goyal <vgoyal@redhat.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'include/linux/iocontext.h')
-rw-r--r-- | include/linux/iocontext.h | 22 |
1 files changed, 20 insertions, 2 deletions
diff --git a/include/linux/iocontext.h b/include/linux/iocontext.h index 81a8870ac224..6f1a2608e91f 100644 --- a/include/linux/iocontext.h +++ b/include/linux/iocontext.h @@ -100,6 +100,7 @@ struct io_cq { */ struct io_context { atomic_long_t refcount; + atomic_t active_ref; atomic_t nr_tasks; /* all the fields below are protected by this lock */ @@ -120,17 +121,34 @@ struct io_context { struct work_struct release_work; }; -static inline void ioc_task_link(struct io_context *ioc) +/** + * get_io_context_active - get active reference on ioc + * @ioc: ioc of interest + * + * Only iocs with active reference can issue new IOs. This function + * acquires an active reference on @ioc. The caller must already have an + * active reference on @ioc. + */ +static inline void get_io_context_active(struct io_context *ioc) { WARN_ON_ONCE(atomic_long_read(&ioc->refcount) <= 0); - WARN_ON_ONCE(atomic_read(&ioc->nr_tasks) <= 0); + WARN_ON_ONCE(atomic_read(&ioc->active_ref) <= 0); atomic_long_inc(&ioc->refcount); + atomic_inc(&ioc->active_ref); +} + +static inline void ioc_task_link(struct io_context *ioc) +{ + get_io_context_active(ioc); + + WARN_ON_ONCE(atomic_read(&ioc->nr_tasks) <= 0); atomic_inc(&ioc->nr_tasks); } struct task_struct; #ifdef CONFIG_BLOCK void put_io_context(struct io_context *ioc); +void put_io_context_active(struct io_context *ioc); void exit_io_context(struct task_struct *task); struct io_context *get_task_io_context(struct task_struct *task, gfp_t gfp_flags, int node); |