Skip to content

Commit a311c48

Browse files
committed
Merge git://git.kvack.org/~bcrl/aio-next
Pull aio fix and cleanups from Ben LaHaise: "This consists of a couple of code cleanups plus a minor bug fix" * git://git.kvack.org/~bcrl/aio-next: aio: cleanup: flatten kill_ioctx() aio: report error from io_destroy() when threads race in io_destroy() fs/aio.c: Remove ctx parameter in kiocb_cancel
2 parents 0506408 + fa88b6f commit a311c48

File tree

1 file changed

+36
-34
lines changed

1 file changed

+36
-34
lines changed

fs/aio.c

Lines changed: 36 additions & 34 deletions
Original file line numberDiff line numberDiff line change
@@ -477,7 +477,7 @@ void kiocb_set_cancel_fn(struct kiocb *req, kiocb_cancel_fn *cancel)
477477
}
478478
EXPORT_SYMBOL(kiocb_set_cancel_fn);
479479

480-
static int kiocb_cancel(struct kioctx *ctx, struct kiocb *kiocb)
480+
static int kiocb_cancel(struct kiocb *kiocb)
481481
{
482482
kiocb_cancel_fn *old, *cancel;
483483

@@ -538,7 +538,7 @@ static void free_ioctx_users(struct percpu_ref *ref)
538538
struct kiocb, ki_list);
539539

540540
list_del_init(&req->ki_list);
541-
kiocb_cancel(ctx, req);
541+
kiocb_cancel(req);
542542
}
543543

544544
spin_unlock_irq(&ctx->ctx_lock);
@@ -727,42 +727,42 @@ static struct kioctx *ioctx_alloc(unsigned nr_events)
727727
* when the processes owning a context have all exited to encourage
728728
* the rapid destruction of the kioctx.
729729
*/
730-
static void kill_ioctx(struct mm_struct *mm, struct kioctx *ctx,
730+
static int kill_ioctx(struct mm_struct *mm, struct kioctx *ctx,
731731
struct completion *requests_done)
732732
{
733-
if (!atomic_xchg(&ctx->dead, 1)) {
734-
struct kioctx_table *table;
733+
struct kioctx_table *table;
735734

736-
spin_lock(&mm->ioctx_lock);
737-
rcu_read_lock();
738-
table = rcu_dereference(mm->ioctx_table);
735+
if (atomic_xchg(&ctx->dead, 1))
736+
return -EINVAL;
739737

740-
WARN_ON(ctx != table->table[ctx->id]);
741-
table->table[ctx->id] = NULL;
742-
rcu_read_unlock();
743-
spin_unlock(&mm->ioctx_lock);
744738

745-
/* percpu_ref_kill() will do the necessary call_rcu() */
746-
wake_up_all(&ctx->wait);
739+
spin_lock(&mm->ioctx_lock);
740+
rcu_read_lock();
741+
table = rcu_dereference(mm->ioctx_table);
747742

748-
/*
749-
* It'd be more correct to do this in free_ioctx(), after all
750-
* the outstanding kiocbs have finished - but by then io_destroy
751-
* has already returned, so io_setup() could potentially return
752-
* -EAGAIN with no ioctxs actually in use (as far as userspace
753-
* could tell).
754-
*/
755-
aio_nr_sub(ctx->max_reqs);
743+
WARN_ON(ctx != table->table[ctx->id]);
744+
table->table[ctx->id] = NULL;
745+
rcu_read_unlock();
746+
spin_unlock(&mm->ioctx_lock);
756747

757-
if (ctx->mmap_size)
758-
vm_munmap(ctx->mmap_base, ctx->mmap_size);
748+
/* percpu_ref_kill() will do the necessary call_rcu() */
749+
wake_up_all(&ctx->wait);
759750

760-
ctx->requests_done = requests_done;
761-
percpu_ref_kill(&ctx->users);
762-
} else {
763-
if (requests_done)
764-
complete(requests_done);
765-
}
751+
/*
752+
* It'd be more correct to do this in free_ioctx(), after all
753+
* the outstanding kiocbs have finished - but by then io_destroy
754+
* has already returned, so io_setup() could potentially return
755+
* -EAGAIN with no ioctxs actually in use (as far as userspace
756+
* could tell).
757+
*/
758+
aio_nr_sub(ctx->max_reqs);
759+
760+
if (ctx->mmap_size)
761+
vm_munmap(ctx->mmap_base, ctx->mmap_size);
762+
763+
ctx->requests_done = requests_done;
764+
percpu_ref_kill(&ctx->users);
765+
return 0;
766766
}
767767

768768
/* wait_on_sync_kiocb:
@@ -1219,21 +1219,23 @@ SYSCALL_DEFINE1(io_destroy, aio_context_t, ctx)
12191219
if (likely(NULL != ioctx)) {
12201220
struct completion requests_done =
12211221
COMPLETION_INITIALIZER_ONSTACK(requests_done);
1222+
int ret;
12221223

12231224
/* Pass requests_done to kill_ioctx() where it can be set
12241225
* in a thread-safe way. If we try to set it here then we have
12251226
* a race condition if two io_destroy() called simultaneously.
12261227
*/
1227-
kill_ioctx(current->mm, ioctx, &requests_done);
1228+
ret = kill_ioctx(current->mm, ioctx, &requests_done);
12281229
percpu_ref_put(&ioctx->users);
12291230

12301231
/* Wait until all IO for the context are done. Otherwise kernel
12311232
* keep using user-space buffers even if user thinks the context
12321233
* is destroyed.
12331234
*/
1234-
wait_for_completion(&requests_done);
1235+
if (!ret)
1236+
wait_for_completion(&requests_done);
12351237

1236-
return 0;
1238+
return ret;
12371239
}
12381240
pr_debug("EINVAL: io_destroy: invalid context id\n");
12391241
return -EINVAL;
@@ -1595,7 +1597,7 @@ SYSCALL_DEFINE3(io_cancel, aio_context_t, ctx_id, struct iocb __user *, iocb,
15951597

15961598
kiocb = lookup_kiocb(ctx, iocb, key);
15971599
if (kiocb)
1598-
ret = kiocb_cancel(ctx, kiocb);
1600+
ret = kiocb_cancel(kiocb);
15991601
else
16001602
ret = -EINVAL;
16011603

0 commit comments

Comments
 (0)