powerpc: Fix write-after-destroy in lock elision [BZ #20822]

The update of *adapt_count after the release of the lock causes a race
condition when thread A unlocks, thread B continues and destroys the
mutex, and thread A writes to *adapt_count.
This commit is contained in:
Tulio Magno Quites Machado Filho 2017-01-03 17:16:02 -02:00
parent daaff5cc79
commit e9a96ea1ac
4 changed files with 33 additions and 12 deletions

View File

@ -1,3 +1,16 @@
2017-01-03 Rajalakshmi Srinivasaraghavan <raji@linux.vnet.ibm.com>
Steven Munroe <sjmunroe@us.ibm.com>
Tulio Magno Quites Machado Filho <tuliom@linux.vnet.ibm.com>
[BZ #20822]
* sysdeps/unix/sysv/linux/powerpc/elision-lock.c
(__lll_lock_elision): Access adapt_count via C11 atomics.
* sysdeps/unix/sysv/linux/powerpc/elision-trylock.c
(__lll_trylock_elision): Likewise.
* sysdeps/unix/sysv/linux/powerpc/elision-unlock.c
(__lll_unlock_elision): Update adapt_count variable inside the
critical section using C11 atomics.
2017-01-03 Joseph Myers <joseph@codesourcery.com>
* math/test-fenvinline.c (do_test): Disable tests of raised

View File

@ -45,7 +45,9 @@
int
__lll_lock_elision (int *lock, short *adapt_count, EXTRAARG int pshared)
{
if (*adapt_count > 0)
/* adapt_count is accessed concurrently but is just a hint. Thus,
use atomic accesses but relaxed MO is sufficient. */
if (atomic_load_relaxed (adapt_count) > 0)
{
goto use_lock;
}
@ -67,7 +69,8 @@ __lll_lock_elision (int *lock, short *adapt_count, EXTRAARG int pshared)
if (_TEXASRU_FAILURE_PERSISTENT (__builtin_get_texasru ()))
{
if (aconf.skip_lock_internal_abort > 0)
*adapt_count = aconf.skip_lock_internal_abort;
atomic_store_relaxed (adapt_count,
aconf.skip_lock_internal_abort);
goto use_lock;
}
}
@ -75,7 +78,8 @@ __lll_lock_elision (int *lock, short *adapt_count, EXTRAARG int pshared)
/* Fall back to locks for a bit if retries have been exhausted */
if (aconf.try_tbegin > 0 && aconf.skip_lock_out_of_tbegin_retries > 0)
*adapt_count = aconf.skip_lock_out_of_tbegin_retries;
atomic_store_relaxed (adapt_count,
aconf.skip_lock_out_of_tbegin_retries);
use_lock:
return LLL_LOCK ((*lock), pshared);

View File

@ -34,7 +34,7 @@ __lll_trylock_elision (int *futex, short *adapt_count)
__libc_tabort (_ABORT_NESTED_TRYLOCK);
/* Only try a transaction if it's worth it. */
if (*adapt_count > 0)
if (atomic_load_relaxed (adapt_count) > 0)
{
goto use_lock;
}
@ -49,7 +49,7 @@ __lll_trylock_elision (int *futex, short *adapt_count)
__libc_tend (0);
if (aconf.skip_lock_busy > 0)
*adapt_count = aconf.skip_lock_busy;
atomic_store_relaxed (adapt_count, aconf.skip_lock_busy);
}
else
{
@ -59,7 +59,8 @@ __lll_trylock_elision (int *futex, short *adapt_count)
result in another failure. Use normal locking now and
for the next couple of calls. */
if (aconf.skip_trylock_internal_abort > 0)
*adapt_count = aconf.skip_trylock_internal_abort;
atomic_store_relaxed (adapt_count,
aconf.skip_trylock_internal_abort);
}
}

View File

@ -28,13 +28,16 @@ __lll_unlock_elision (int *lock, short *adapt_count, int pshared)
__libc_tend (0);
else
{
lll_unlock ((*lock), pshared);
/* Update adapt_count in the critical section to prevent a
write-after-destroy error as mentioned in BZ 20822. The
following update of adapt_count has to be contained within
the critical region of the fall-back lock in order to not violate
the mutex destruction requirements. */
short __tmp = atomic_load_relaxed (adapt_count);
if (__tmp > 0)
atomic_store_relaxed (adapt_count, __tmp--);
/* Update the adapt count AFTER completing the critical section.
Doing this here prevents unneeded stalling when entering
a critical section. Saving about 8% runtime on P8. */
if (*adapt_count > 0)
(*adapt_count)--;
lll_unlock ((*lock), pshared);
}
return 0;
}