ipc/sem.c: rewrite undo list locking

The attached patch:
- reverses the locking order of ulp->lock and sem_lock:
  Previously, it was first ulp->lock, then inside sem_lock.
  Now it's the other way around.
- converts the undo structure to rcu.

Benefits:
- With the old locking order, IPC_RMID could not kfree the undo structures.
  The stale entries remained in the linked lists and were released later.
- The patch fixes a a race in semtimedop(): if both IPC_RMID and a semget() that
  recreates exactly the same id happen between find_alloc_undo() and sem_lock,
  then semtimedop() would access already kfree'd memory.

[akpm@linux-foundation.org: coding-style fixes]
Signed-off-by: Manfred Spraul <manfred@colorfullife.com>
Reviewed-by: Nadia Derbey <Nadia.Derbey@bull.net>
Cc: Pierre Peiffer <peifferp@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
Manfred Spraul 2008-07-25 01:48:06 -07:00 committed by Linus Torvalds
parent a1193f8ec0
commit 380af1b33b
2 changed files with 99 additions and 56 deletions

View File

@ -78,6 +78,7 @@ struct seminfo {
#ifdef __KERNEL__ #ifdef __KERNEL__
#include <asm/atomic.h> #include <asm/atomic.h>
#include <linux/rcupdate.h>
struct task_struct; struct task_struct;
@ -114,7 +115,10 @@ struct sem_queue {
* when the process exits. * when the process exits.
*/ */
struct sem_undo { struct sem_undo {
struct list_head list_proc; /* per-process list: all undos from one process */ struct list_head list_proc; /* per-process list: all undos from one process. */
/* rcu protected */
struct rcu_head rcu; /* rcu struct for sem_undo() */
struct sem_undo_list *ulp; /* sem_undo_list for the process */
struct list_head list_id; /* per semaphore array list: all undos for one array */ struct list_head list_id; /* per semaphore array list: all undos for one array */
int semid; /* semaphore set identifier */ int semid; /* semaphore set identifier */
short * semadj; /* array of adjustments, one per semaphore */ short * semadj; /* array of adjustments, one per semaphore */

149
ipc/sem.c
View File

@ -504,27 +504,35 @@ static int count_semzcnt (struct sem_array * sma, ushort semnum)
return semzcnt; return semzcnt;
} }
void free_un(struct rcu_head *head)
{
struct sem_undo *un = container_of(head, struct sem_undo, rcu);
kfree(un);
}
/* Free a semaphore set. freeary() is called with sem_ids.rw_mutex locked /* Free a semaphore set. freeary() is called with sem_ids.rw_mutex locked
* as a writer and the spinlock for this semaphore set hold. sem_ids.rw_mutex * as a writer and the spinlock for this semaphore set hold. sem_ids.rw_mutex
* remains locked on exit. * remains locked on exit.
*/ */
static void freeary(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp) static void freeary(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp)
{ {
struct sem_undo *un; struct sem_undo *un, *tu;
struct sem_queue *q, *t; struct sem_queue *q, *tq;
struct sem_array *sma = container_of(ipcp, struct sem_array, sem_perm); struct sem_array *sma = container_of(ipcp, struct sem_array, sem_perm);
/* Invalidate the existing undo structures for this semaphore set. /* Free the existing undo structures for this semaphore set. */
* (They will be freed without any further action in exit_sem()
* or during the next semop.)
*/
assert_spin_locked(&sma->sem_perm.lock); assert_spin_locked(&sma->sem_perm.lock);
list_for_each_entry(un, &sma->list_id, list_id) list_for_each_entry_safe(un, tu, &sma->list_id, list_id) {
list_del(&un->list_id);
spin_lock(&un->ulp->lock);
un->semid = -1; un->semid = -1;
list_del_rcu(&un->list_proc);
spin_unlock(&un->ulp->lock);
call_rcu(&un->rcu, free_un);
}
/* Wake up all pending processes and let them fail with EIDRM. */ /* Wake up all pending processes and let them fail with EIDRM. */
list_for_each_entry_safe(q, tq, &sma->sem_pending, list) {
list_for_each_entry_safe(q, t, &sma->sem_pending, list) {
list_del(&q->list); list_del(&q->list);
q->status = IN_WAKEUP; q->status = IN_WAKEUP;
@ -948,16 +956,11 @@ static inline int get_undo_list(struct sem_undo_list **undo_listp)
static struct sem_undo *lookup_undo(struct sem_undo_list *ulp, int semid) static struct sem_undo *lookup_undo(struct sem_undo_list *ulp, int semid)
{ {
struct sem_undo *walk, *tmp; struct sem_undo *walk;
assert_spin_locked(&ulp->lock); list_for_each_entry_rcu(walk, &ulp->list_proc, list_proc) {
list_for_each_entry_safe(walk, tmp, &ulp->list_proc, list_proc) {
if (walk->semid == semid) if (walk->semid == semid)
return walk; return walk;
if (walk->semid == -1) {
list_del(&walk->list_proc);
kfree(walk);
}
} }
return NULL; return NULL;
} }
@ -970,6 +973,8 @@ static struct sem_undo *lookup_undo(struct sem_undo_list *ulp, int semid)
* The function looks up (and if not present creates) the undo structure. * The function looks up (and if not present creates) the undo structure.
* The size of the undo structure depends on the size of the semaphore * The size of the undo structure depends on the size of the semaphore
* array, thus the alloc path is not that straightforward. * array, thus the alloc path is not that straightforward.
* Lifetime-rules: sem_undo is rcu-protected, on success, the function
* performs a rcu_read_lock().
*/ */
static struct sem_undo *find_alloc_undo(struct ipc_namespace *ns, int semid) static struct sem_undo *find_alloc_undo(struct ipc_namespace *ns, int semid)
{ {
@ -983,11 +988,13 @@ static struct sem_undo *find_alloc_undo(struct ipc_namespace *ns, int semid)
if (error) if (error)
return ERR_PTR(error); return ERR_PTR(error);
rcu_read_lock();
spin_lock(&ulp->lock); spin_lock(&ulp->lock);
un = lookup_undo(ulp, semid); un = lookup_undo(ulp, semid);
spin_unlock(&ulp->lock); spin_unlock(&ulp->lock);
if (likely(un!=NULL)) if (likely(un!=NULL))
goto out; goto out;
rcu_read_unlock();
/* no undo structure around - allocate one. */ /* no undo structure around - allocate one. */
/* step 1: figure out the size of the semaphore array */ /* step 1: figure out the size of the semaphore array */
@ -1005,38 +1012,38 @@ static struct sem_undo *find_alloc_undo(struct ipc_namespace *ns, int semid)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
} }
/* step 3: Acquire the lock on the undo list pointer */ /* step 3: Acquire the lock on semaphore array */
spin_lock(&ulp->lock);
/* step 4: check for races: someone else allocated the undo struct,
* semaphore array was destroyed.
*/
un = lookup_undo(ulp, semid);
if (un) {
spin_unlock(&ulp->lock);
kfree(new);
sem_putref(sma);
goto out;
}
sem_lock_and_putref(sma); sem_lock_and_putref(sma);
if (sma->sem_perm.deleted) { if (sma->sem_perm.deleted) {
sem_unlock(sma); sem_unlock(sma);
spin_unlock(&ulp->lock);
kfree(new); kfree(new);
un = ERR_PTR(-EIDRM); un = ERR_PTR(-EIDRM);
goto out; goto out;
} }
spin_lock(&ulp->lock);
/*
* step 4: check for races: did someone else allocate the undo struct?
*/
un = lookup_undo(ulp, semid);
if (un) {
kfree(new);
goto success;
}
/* step 5: initialize & link new undo structure */ /* step 5: initialize & link new undo structure */
new->semadj = (short *) &new[1]; new->semadj = (short *) &new[1];
new->ulp = ulp;
new->semid = semid; new->semid = semid;
assert_spin_locked(&ulp->lock); assert_spin_locked(&ulp->lock);
list_add(&new->list_proc, &ulp->list_proc); list_add_rcu(&new->list_proc, &ulp->list_proc);
assert_spin_locked(&sma->sem_perm.lock); assert_spin_locked(&sma->sem_perm.lock);
list_add(&new->list_id, &sma->list_id); list_add(&new->list_id, &sma->list_id);
sem_unlock(sma);
spin_unlock(&ulp->lock);
un = new; un = new;
success:
spin_unlock(&ulp->lock);
rcu_read_lock();
sem_unlock(sma);
out: out:
return un; return un;
} }
@ -1103,6 +1110,8 @@ asmlinkage long sys_semtimedop(int semid, struct sembuf __user *tsops,
sma = sem_lock_check(ns, semid); sma = sem_lock_check(ns, semid);
if (IS_ERR(sma)) { if (IS_ERR(sma)) {
if (un)
rcu_read_unlock();
error = PTR_ERR(sma); error = PTR_ERR(sma);
goto out_free; goto out_free;
} }
@ -1111,10 +1120,26 @@ asmlinkage long sys_semtimedop(int semid, struct sembuf __user *tsops,
* semid identifiers are not unique - find_alloc_undo may have * semid identifiers are not unique - find_alloc_undo may have
* allocated an undo structure, it was invalidated by an RMID * allocated an undo structure, it was invalidated by an RMID
* and now a new array with received the same id. Check and fail. * and now a new array with received the same id. Check and fail.
* This case can be detected checking un->semid. The existance of
* "un" itself is guaranteed by rcu.
*/ */
error = -EIDRM; error = -EIDRM;
if (un && un->semid == -1) if (un) {
goto out_unlock_free; if (un->semid == -1) {
rcu_read_unlock();
goto out_unlock_free;
} else {
/*
* rcu lock can be released, "un" cannot disappear:
* - sem_lock is acquired, thus IPC_RMID is
* impossible.
* - exit_sem is impossible, it always operates on
* current (or a dead task).
*/
rcu_read_unlock();
}
}
error = -EFBIG; error = -EFBIG;
if (max >= sma->sem_nsems) if (max >= sma->sem_nsems)
@ -1242,7 +1267,6 @@ int copy_semundo(unsigned long clone_flags, struct task_struct *tsk)
void exit_sem(struct task_struct *tsk) void exit_sem(struct task_struct *tsk)
{ {
struct sem_undo_list *ulp; struct sem_undo_list *ulp;
struct sem_undo *un, *tmp;
ulp = tsk->sysvsem.undo_list; ulp = tsk->sysvsem.undo_list;
if (!ulp) if (!ulp)
@ -1252,28 +1276,47 @@ void exit_sem(struct task_struct *tsk)
if (!atomic_dec_and_test(&ulp->refcnt)) if (!atomic_dec_and_test(&ulp->refcnt))
return; return;
spin_lock(&ulp->lock); for (;;) {
list_for_each_entry_safe(un, tmp, &ulp->list_proc, list_proc) {
struct sem_array *sma; struct sem_array *sma;
struct sem_undo *un;
int semid;
int i; int i;
if (un->semid == -1) rcu_read_lock();
goto free; un = list_entry(rcu_dereference(ulp->list_proc.next),
struct sem_undo, list_proc);
if (&un->list_proc == &ulp->list_proc)
semid = -1;
else
semid = un->semid;
rcu_read_unlock();
sma = sem_lock(tsk->nsproxy->ipc_ns, un->semid); if (semid == -1)
break;
sma = sem_lock_check(tsk->nsproxy->ipc_ns, un->semid);
/* exit_sem raced with IPC_RMID, nothing to do */
if (IS_ERR(sma)) if (IS_ERR(sma))
goto free; continue;
if (un->semid == -1) un = lookup_undo(ulp, semid);
goto unlock_free; if (un == NULL) {
/* exit_sem raced with IPC_RMID+semget() that created
* exactly the same semid. Nothing to do.
*/
sem_unlock(sma);
continue;
}
BUG_ON(sem_checkid(sma, un->semid)); /* remove un from the linked lists */
/* remove un from sma->list_id */
assert_spin_locked(&sma->sem_perm.lock); assert_spin_locked(&sma->sem_perm.lock);
list_del(&un->list_id); list_del(&un->list_id);
spin_lock(&ulp->lock);
list_del_rcu(&un->list_proc);
spin_unlock(&ulp->lock);
/* perform adjustments registered in un */ /* perform adjustments registered in un */
for (i = 0; i < sma->sem_nsems; i++) { for (i = 0; i < sma->sem_nsems; i++) {
struct sem * semaphore = &sma->sem_base[i]; struct sem * semaphore = &sma->sem_base[i];
@ -1302,14 +1345,10 @@ void exit_sem(struct task_struct *tsk)
sma->sem_otime = get_seconds(); sma->sem_otime = get_seconds();
/* maybe some queued-up processes were waiting for this */ /* maybe some queued-up processes were waiting for this */
update_queue(sma); update_queue(sma);
unlock_free:
sem_unlock(sma); sem_unlock(sma);
free:
assert_spin_locked(&ulp->lock); call_rcu(&un->rcu, free_un);
list_del(&un->list_proc);
kfree(un);
} }
spin_unlock(&ulp->lock);
kfree(ulp); kfree(ulp);
} }