* sysdeps/unix/sysv/linux/ia64/lowlevellock.h: Likewise.
	* sysdeps/unix/sysv/linux/powerpc/lowlevellock.h: Likewise.
	* sysdeps/unix/sysv/linux/lowlevellock.c: Likewise.
	* sysdeps/pthread/pthread_cond_signal.c: Don't use requeue.
This commit is contained in:
Ulrich Drepper 2003-09-22 05:45:50 +00:00
parent 3a226d3301
commit bc1989aad2
5 changed files with 29 additions and 59 deletions

View File

@ -4,9 +4,12 @@
locking macros. No distinction between normal and mutex locking
anymore.
* sysdeps/unix/sysv/linux/x86_64/lowlevellock.h: Likewise.
* sysdeps/unix/sysv/linux/ia64/lowlevellock.h: Likewise.
* sysdeps/unix/sysv/linux/powerpc/lowlevellock.h: Likewise.
* sysdeps/unix/sysv/linux/i386/i486/lowlevellock.S: Rewrite mutex
locking. Merge bits from lowlevelmutex.S we still need.
* sysdeps/unix/sysv/linux/x86_64/lowlevellock.S: Likewise.
* sysdeps/unix/sysv/linux/lowlevellock.c: Likewise.
* sysdeps/unix/sysv/linux/i386/i486/lowlevelmutex.S: Removed.
* sysdeps/unix/sysv/linux/x86_64/lowlevelmutex.S: Removed.
* Makefile (routines): Remove libc-lowlevelmutex.
@ -37,6 +40,7 @@
* sysdeps/unix/sysv/linux/i386/i486/pthread_cond_signal.S: Likewise.
Don't use requeue.
* sysdeps/unix/sysv/linux/x86_64/pthread_cond_signal.S: Likewise.
* sysdeps/pthread/pthread_cond_signal.c: Don't use requeue.
2003-09-20 Ulrich Drepper <drepper@redhat.com>

View File

@ -52,22 +52,7 @@ __pthread_cond_signal (cond)
#endif
/* Wake one. */
int r = lll_futex_requeue (futex, 0, 1, &cond->__data.__lock);
if (__builtin_expect (r == -EINVAL, 0))
{
/* The requeue functionality is not available. */
#ifndef __ASSUME_FUTEX_REQUEUE
lll_futex_wake (futex, 1);
#endif
}
else if (r != 0)
{
/* We always have to make the syscall if requeue actually
moved a thread. */
lll_mutex_unlock_force (cond->__data.__lock);
return 0;
}
lll_futex_wake (futex, 1);
}
/* We are done. */

View File

@ -127,10 +127,8 @@ static inline void
__attribute__ ((always_inline))
__lll_mutex_lock (int *futex)
{
int val = atomic_exchange_and_add (futex, 1);
if (__builtin_expect (val != 0, 0))
__lll_lock_wait (futex, val);
if (atomic_compare_and_exchange_bool_acq (futex, 1, 0) != 0)
__lll_lock_wait (futex);
}
#define lll_mutex_lock(futex) __lll_mutex_lock (&(futex))
@ -139,13 +137,8 @@ static inline void
__attribute__ ((always_inline))
__lll_mutex_cond_lock (int *futex)
{
int val = atomic_exchange_and_add (futex, 2);
if (__builtin_expect (val != 0, 0))
/* Note, the val + 1 is kind of ugly here. __lll_lock_wait will add
1 again. But we added 2 to the futex value so this is the right
value which will be passed to the kernel. */
__lll_lock_wait (futex, val + 1);
if (atomic_compare_and_exchange_bool_acq (futex, 2, 0) != 0)
__lll_lock_wait (futex);
}
#define lll_mutex_cond_lock(futex) __lll_mutex_cond_lock (&(futex))
@ -158,11 +151,10 @@ static inline int
__attribute__ ((always_inline))
__lll_mutex_timedlock (int *futex, const struct timespec *abstime)
{
int val = atomic_exchange_and_add (futex, 1);
int result = 0;
if (__builtin_expect (val != 0, 0))
result = __lll_timedlock_wait (futex, val, abstime);
if (atomic_compare_and_exchange_bool_acq (futex, 1, 0) != 0)
result = __lll_timedlock_wait (futex, abstime);
return result;
}

View File

@ -25,22 +25,20 @@
void
__lll_lock_wait (int *futex, int val)
__lll_lock_wait (int *futex)
{
/* In the loop we are going to add 2 instead of 1 which is what
the caller did. Account for that. */
--val;
do
{
lll_futex_wait (futex, val + 2);
val = atomic_exchange_and_add (futex, 2);
int oldval = atomic_compare_and_exchange_val_acq (futex, 2, 1);
if (oldval != 0)
lll_futex_wait (futex, 2);
}
while (val != 0);
while (atomic_compare_and_exchange_bool_acq (futex, 2, 0) != 0);
}
int
__lll_timedlock_wait (int *futex, int val, const struct timespec *abstime)
__lll_timedlock_wait (int *futex, const struct timespec *abstime)
{
/* Reject invalid timeouts. */
if (abstime->tv_nsec < 0 || abstime->tv_nsec >= 1000000000)
@ -68,12 +66,12 @@ __lll_timedlock_wait (int *futex, int val, const struct timespec *abstime)
return ETIMEDOUT;
/* Wait. */
if (lll_futex_timed_wait (futex, val + 1, &rt) == -ETIMEDOUT)
return ETIMEDOUT;
int oldval = atomic_compare_and_exchange_val_acq (futex, 2, 1);
if (oldval != 0)
lll_futex_wait (futex, 2);
}
while ((val = atomic_exchange_and_add (futex, 1)) != 0);
while (atomic_compare_and_exchange_bool_acq (futex, 2, 0) != 0);
*futex = 2;
return 0;
}

View File

@ -106,34 +106,25 @@ extern void __lll_lock_wait (int *futex, int val) attribute_hidden;
#define lll_mutex_lock(lock) \
(void) ({ \
int *__futex = &(lock); \
int __val = atomic_exchange_and_add (__futex, 1); \
__asm __volatile (__lll_acq_instr ::: "memory"); \
if (__builtin_expect (__val != 0, 0)) \
__lll_lock_wait (__futex, __val); \
if (atomic_compare_and_exchange_bool_acq (__futex, 1, 0) != 0) \
__lll_lock_wait (__futex); \
})
#define lll_mutex_cond_lock(lock) \
(void) ({ \
int *__futex = &(lock); \
int __val = atomic_exchange_and_add (__futex, 2); \
__asm __volatile (__lll_acq_instr ::: "memory"); \
if (__builtin_expect (__val != 0, 0)) \
/* Note, the val + 1 is kind of ugly here. __lll_lock_wait will add \
1 again. But we added 2 to the futex value so this is the right \
value which will be passed to the kernel. */ \
__lll_lock_wait (__futex, __val + 1); \
if (atomic_compare_and_exchange_bool_acq (__futex, 2, 0) != 0) \
__lll_lock_wait (__futex); \
})
extern int __lll_timedlock_wait
(int *futex, int val, const struct timespec *) attribute_hidden;
#define lll_mutex_timedlock(lock, abstime) \
({ int *__futex = &(lock); \
int __val = atomic_exchange_and_add (__futex, 1); \
__asm __volatile (__lll_acq_instr ::: "memory"); \
if (__builtin_expect (__val != 0, 0)) \
__val = __lll_timedlock_wait (__futex, __val, (abstime)); \
__val; \
(void) ({ \
int *__futex = &(lock); \
if (atomic_compare_and_exchange_bool_acq (__futex, 1, 0) != 0) \
__lll_timedlock_wait (__futex, abstime); \
})
#define lll_mutex_unlock(lock) \