summaryrefslogtreecommitdiff
path: root/fs/aio.c
diff options
context:
space:
mode:
authorBenjamin LaHaise <bcrl@kvack.org>2014-07-14 12:49:26 -0400
committerBenjamin LaHaise <bcrl@kvack.org>2014-07-14 13:05:26 -0400
commit263782c1c95bbddbb022dc092fd89a36bb8d5577 (patch)
tree93044b03084bf998eec530621486791e987717ed /fs/aio.c
parent1795cd9b3a91d4b5473c97f491d63892442212ab (diff)
downloadlinux-rt-263782c1c95bbddbb022dc092fd89a36bb8d5577.tar.gz
aio: protect reqs_available updates from changes in interrupt handlers
As of commit f8567a3845ac05bb28f3c1b478ef752762bd39ef it is now possible to have put_reqs_available() called from irq context. While put_reqs_available() is per cpu, it did not protect itself from interrupts on the same CPU. This lead to aio_complete() corrupting the available io requests count when run under a heavy O_DIRECT workloads as reported by Robert Elliott. Fix this by disabling irq updates around the per cpu batch updates of reqs_available. Many thanks to Robert and folks for testing and tracking this down. Reported-by: Robert Elliot <Elliott@hp.com> Tested-by: Robert Elliot <Elliott@hp.com> Signed-off-by: Benjamin LaHaise <bcrl@kvack.org> Cc: Jens Axboe <axboe@kernel.dk>, Christoph Hellwig <hch@infradead.org> Cc: stable@vger.kenel.org
Diffstat (limited to 'fs/aio.c')
-rw-r--r--fs/aio.c7
1 files changed, 7 insertions, 0 deletions
diff --git a/fs/aio.c b/fs/aio.c
index 955947ef3e02..1c9c5f0a9e2b 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -830,16 +830,20 @@ void exit_aio(struct mm_struct *mm)
static void put_reqs_available(struct kioctx *ctx, unsigned nr)
{
struct kioctx_cpu *kcpu;
+ unsigned long flags;
preempt_disable();
kcpu = this_cpu_ptr(ctx->cpu);
+ local_irq_save(flags);
kcpu->reqs_available += nr;
+
while (kcpu->reqs_available >= ctx->req_batch * 2) {
kcpu->reqs_available -= ctx->req_batch;
atomic_add(ctx->req_batch, &ctx->reqs_available);
}
+ local_irq_restore(flags);
preempt_enable();
}
@@ -847,10 +851,12 @@ static bool get_reqs_available(struct kioctx *ctx)
{
struct kioctx_cpu *kcpu;
bool ret = false;
+ unsigned long flags;
preempt_disable();
kcpu = this_cpu_ptr(ctx->cpu);
+ local_irq_save(flags);
if (!kcpu->reqs_available) {
int old, avail = atomic_read(&ctx->reqs_available);
@@ -869,6 +875,7 @@ static bool get_reqs_available(struct kioctx *ctx)
ret = true;
kcpu->reqs_available--;
out:
+ local_irq_restore(flags);
preempt_enable();
return ret;
}