aio: fix error handling and rcu usage in "convert the ioctx list to table lookup v3"
In the patch "aio: convert the ioctx list to table lookup v3", incorrect handling in the ioctx_alloc() error path was introduced that lead to an ioctx being added via ioctx_add_table() while freed when the ioctx_alloc() call returned -EAGAIN due to hitting the aio_max_nr limit. Fix this by only calling ioctx_add_table() as the last step in ioctx_alloc(). Also, several unnecessary rcu_dereference() calls were added that lead to RCU warnings where the system was already protected by a spin lock for accessing mm->ioctx_table. Signed-off-by: Benjamin LaHaise <bcrl@kvack.org>
This commit is contained in:
parent
6878ea72a5
commit
da90382c2e
17
fs/aio.c
17
fs/aio.c
|
@ -475,7 +475,7 @@ static int ioctx_add_table(struct kioctx *ctx, struct mm_struct *mm)
|
||||||
struct aio_ring *ring;
|
struct aio_ring *ring;
|
||||||
|
|
||||||
spin_lock(&mm->ioctx_lock);
|
spin_lock(&mm->ioctx_lock);
|
||||||
table = rcu_dereference(mm->ioctx_table);
|
table = mm->ioctx_table;
|
||||||
|
|
||||||
while (1) {
|
while (1) {
|
||||||
if (table)
|
if (table)
|
||||||
|
@ -503,7 +503,7 @@ static int ioctx_add_table(struct kioctx *ctx, struct mm_struct *mm)
|
||||||
table->nr = new_nr;
|
table->nr = new_nr;
|
||||||
|
|
||||||
spin_lock(&mm->ioctx_lock);
|
spin_lock(&mm->ioctx_lock);
|
||||||
old = rcu_dereference(mm->ioctx_table);
|
old = mm->ioctx_table;
|
||||||
|
|
||||||
if (!old) {
|
if (!old) {
|
||||||
rcu_assign_pointer(mm->ioctx_table, table);
|
rcu_assign_pointer(mm->ioctx_table, table);
|
||||||
|
@ -579,10 +579,6 @@ static struct kioctx *ioctx_alloc(unsigned nr_events)
|
||||||
if (ctx->req_batch < 1)
|
if (ctx->req_batch < 1)
|
||||||
ctx->req_batch = 1;
|
ctx->req_batch = 1;
|
||||||
|
|
||||||
err = ioctx_add_table(ctx, mm);
|
|
||||||
if (err)
|
|
||||||
goto out_cleanup_noerr;
|
|
||||||
|
|
||||||
/* limit the number of system wide aios */
|
/* limit the number of system wide aios */
|
||||||
spin_lock(&aio_nr_lock);
|
spin_lock(&aio_nr_lock);
|
||||||
if (aio_nr + nr_events > (aio_max_nr * 2UL) ||
|
if (aio_nr + nr_events > (aio_max_nr * 2UL) ||
|
||||||
|
@ -595,13 +591,18 @@ static struct kioctx *ioctx_alloc(unsigned nr_events)
|
||||||
|
|
||||||
percpu_ref_get(&ctx->users); /* io_setup() will drop this ref */
|
percpu_ref_get(&ctx->users); /* io_setup() will drop this ref */
|
||||||
|
|
||||||
|
err = ioctx_add_table(ctx, mm);
|
||||||
|
if (err)
|
||||||
|
goto out_cleanup_put;
|
||||||
|
|
||||||
pr_debug("allocated ioctx %p[%ld]: mm=%p mask=0x%x\n",
|
pr_debug("allocated ioctx %p[%ld]: mm=%p mask=0x%x\n",
|
||||||
ctx, ctx->user_id, mm, ctx->nr_events);
|
ctx, ctx->user_id, mm, ctx->nr_events);
|
||||||
return ctx;
|
return ctx;
|
||||||
|
|
||||||
|
out_cleanup_put:
|
||||||
|
percpu_ref_put(&ctx->users);
|
||||||
out_cleanup:
|
out_cleanup:
|
||||||
err = -EAGAIN;
|
err = -EAGAIN;
|
||||||
out_cleanup_noerr:
|
|
||||||
aio_free_ring(ctx);
|
aio_free_ring(ctx);
|
||||||
out_freepcpu:
|
out_freepcpu:
|
||||||
free_percpu(ctx->cpu);
|
free_percpu(ctx->cpu);
|
||||||
|
@ -626,7 +627,7 @@ static void kill_ioctx(struct mm_struct *mm, struct kioctx *ctx)
|
||||||
struct kioctx_table *table;
|
struct kioctx_table *table;
|
||||||
|
|
||||||
spin_lock(&mm->ioctx_lock);
|
spin_lock(&mm->ioctx_lock);
|
||||||
table = rcu_dereference(mm->ioctx_table);
|
table = mm->ioctx_table;
|
||||||
|
|
||||||
WARN_ON(ctx != table->table[ctx->id]);
|
WARN_ON(ctx != table->table[ctx->id]);
|
||||||
table->table[ctx->id] = NULL;
|
table->table[ctx->id] = NULL;
|
||||||
|
|
Loading…
Reference in New Issue