2000-06-25  Ulrich Drepper  <drepper@redhat.com>

	* Makefile (tests): Add ex10.  Add rules to build it.
	* Versions [GLIBC_2.2] (libpthread): Add pthread_mutex_timedlock,
	pthread_rwlock_timedrdlock, and pthread_rwlock_timedwrlock.
	* condvar.c (pthread_cond_wait): Allow mutex of kind
	PTHREAD_MUTEX_TIMED_NP.
	(pthread_cond_timedwait_relative): Likewise.
	* mutex.c (__pthread_mutex_init): Default is PTHREAD_MUTEX_TIMED_NP.
	(__pthread_mutex_trylock): Use __pthread_alt_trylock for
	PTHREAD_MUTEX_ERRORCHECK_NP.  Handle PTHREAD_MUTEX_TIMED_NP.
	(__pthread_mutex_lock): Use __pthread_alt_lock for
	PTHREAD_MUTEX_ERRORCHECK_NP.  Handle PTHREAD_MUTEX_TIMED_NP.
	(__pthread_mutex_timedlock): New function.
	(__pthread_mutex_unlock): Use __pthread_alt_unlock for
	PTHREAD_MUTEX_ERRORCHECK_NP.  Handle PTHREAD_MUTEX_TIMED_NP.
	(__pthread_mutexattr_init): Use PTHREAD_MUTEX_TIMED_NP.
	(__pthread_mutexattr_settype): Allow PTHREAD_MUTEX_TIMED_NP.
	* spinlock.c: Implement alternate fastlocks.
	* spinlock.h: Add prototypes.
	* Examples/ex10.c: New file.
	* sysdeps/pthread/pthread.h: Add prototypes for new functions.
	Patch by Kaz Kylheku <kaz@ashi.footprints.net>.

	* rwlock.c (__pthread_rwlock_rdlock): Optimize loop a bit.
	(__pthread_rwlock_timedrdlock): New function.
	(__pthread_rwlock_timedwrlock): New function.
	Use laternate fastlock function everywhere.
This commit is contained in:
Ulrich Drepper 2000-06-26 01:47:56 +00:00
parent 7475d01602
commit d82e4c7bb2
10 changed files with 622 additions and 37 deletions

View File

@ -1,3 +1,32 @@
2000-06-25 Ulrich Drepper <drepper@redhat.com>
* Makefile (tests): Add ex10. Add rules to build it.
* Versions [GLIBC_2.2] (libpthread): Add pthread_mutex_timedlock,
pthread_rwlock_timedrdlock, and pthread_rwlock_timedwrlock.
* condvar.c (pthread_cond_wait): Allow mutex of kind
PTHREAD_MUTEX_TIMED_NP.
(pthread_cond_timedwait_relative): Likewise.
* mutex.c (__pthread_mutex_init): Default is PTHREAD_MUTEX_TIMED_NP.
(__pthread_mutex_trylock): Use __pthread_alt_trylock for
PTHREAD_MUTEX_ERRORCHECK_NP. Handle PTHREAD_MUTEX_TIMED_NP.
(__pthread_mutex_lock): Use __pthread_alt_lock for
PTHREAD_MUTEX_ERRORCHECK_NP. Handle PTHREAD_MUTEX_TIMED_NP.
(__pthread_mutex_timedlock): New function.
(__pthread_mutex_unlock): Use __pthread_alt_unlock for
PTHREAD_MUTEX_ERRORCHECK_NP. Handle PTHREAD_MUTEX_TIMED_NP.
(__pthread_mutexattr_init): Use PTHREAD_MUTEX_TIMED_NP.
(__pthread_mutexattr_settype): Allow PTHREAD_MUTEX_TIMED_NP.
* spinlock.c: Implement alternate fastlocks.
* spinlock.h: Add prototypes.
* Examples/ex10.c: New file.
* sysdeps/pthread/pthread.h: Add prototypes for new functions.
Patch by Kaz Kylheku <kaz@ashi.footprints.net>.
* rwlock.c (__pthread_rwlock_rdlock): Optimize loop a bit.
(__pthread_rwlock_timedrdlock): New function.
(__pthread_rwlock_timedwrlock): New function.
Use laternate fastlock function everywhere.
2000-06-21 Andreas Jaeger <aj@suse.de>
* sysdeps/pthread/timer_routines.c: Include <string.h> for memset

View File

@ -0,0 +1,105 @@
/* Tests for pthread_mutex_timedlock function.
Copyright (C) 2000 Free Software Foundation, Inc.
Contributed by Kaz Kylheku <kaz@ashi.footprints.net>, 2000.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Library General Public License as
published by the Free Software Foundation; either version 2 of the
License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Library General Public License for more details.
You should have received a copy of the GNU Library General Public
License along with the GNU C Library; see the file COPYING.LIB. If not,
write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
Boston, MA 02111-1307, USA. */
#include <errno.h>
#include <error.h>
#include <stdio.h>
#include <stdlib.h>
#include <pthread.h>
#include <time.h>
#define NUM_THREADS 10
#define NUM_ITERS 50
#define TIMEOUT_NS 100000000L
static void *thread (void *);
static pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER;
int
main (void)
{
pthread_t th;
int i;
for (i = 0; i < NUM_THREADS; i++)
{
if (pthread_create (&th, NULL, thread, NULL) != 0)
error (EXIT_FAILURE, 0, "cannot create thread");
}
(void) thread (NULL);
/* notreached */
return 0;
}
static void *
thread (void *arg)
{
int i;
pthread_t self = pthread_self ();
static int linecount; /* protected by flockfile(stdout) */
for (i = 0; i < NUM_ITERS; i++)
{
struct timespec ts;
for (;;)
{
clock_gettime (CLOCK_REALTIME, &ts);
ts.tv_nsec += TIMEOUT_NS;
if (ts.tv_nsec > 1000000000L) {
ts.tv_sec++;
ts.tv_nsec -= 1000000000L;
}
switch (pthread_mutex_timedlock (&mutex, &ts))
{
case 0:
flockfile (stdout);
printf ("%04d: thread %lu got mutex\n", ++linecount,
(unsigned long) self);
funlockfile (stdout);
break;
case ETIMEDOUT:
flockfile (stdout);
printf ("%04d: thread %lu timed out on mutex\n", ++linecount,
(unsigned long) self);
funlockfile (stdout);
continue;
}
break;
}
ts.tv_sec = 0;
ts.tv_nsec = TIMEOUT_NS;
nanosleep (&ts, NULL);
flockfile (stdout);
printf ("%04d: thread %lu releasing mutex\n", ++linecount,
(unsigned long) self);
funlockfile (stdout);
pthread_mutex_unlock (&mutex);
}
pthread_exit (NULL);
}

View File

@ -38,7 +38,7 @@ libpthread-routines := attr cancel condvar join manager mutex ptfork \
oldsemaphore events getcpuclockid pspinlock barrier
vpath %.c Examples
tests = ex1 ex2 ex3 ex4 ex5 ex6 ex7 ex8 ex9 joinrace
tests = ex1 ex2 ex3 ex4 ex5 ex6 ex7 ex8 ex9 ex10 joinrace
include ../Rules
@ -56,8 +56,10 @@ $(objpfx)libpthread.so: $(common-objpfx)libc.so
# Make sure we link with the thread library.
ifeq ($(build-shared),yes)
libpthread = $(objpfx)libpthread.so
librt = $(common-objpfx)rt/librt.so
else
libpthread = $(objpfx)libpthread.a
librt = $(common-objpfx)rt/librt.a
endif
$(objpfx)ex1: $(libpthread)
@ -69,4 +71,5 @@ $(objpfx)ex6: $(libpthread)
$(objpfx)ex7: $(libpthread)
$(objpfx)ex8: $(libpthread)
$(objpfx)ex9: $(libpthread)
$(objpfx)ex10: $(libpthread) $(librt)
$(objpfx)joinrace: $(libpthread)

View File

@ -138,6 +138,8 @@ libpthread {
pthread_barrier_destroy; pthread_barrier_init; pthread_barrier_wait;
pthread_barrierattr_destroy; pthread_barrierattr_init;
pthread_barrierattr_getpshared; pthread_barrierattr_setpshared;
pthread_mutex_timedlock;
pthread_rwlock_timedrdlock; pthread_rwlock_timedwrlock;
# Extensions.
pthread_yield;

View File

@ -62,7 +62,9 @@ int pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex)
int already_canceled = 0;
/* Check whether the mutex is locked and owned by this thread. */
if (mutex->__m_kind != PTHREAD_MUTEX_FAST_NP && mutex->__m_owner != self)
if (mutex->__m_kind != PTHREAD_MUTEX_TIMED_NP
&& mutex->__m_kind != PTHREAD_MUTEX_FAST_NP
&& mutex->__m_owner != self)
return EINVAL;
/* Set up extrication interface */
@ -121,7 +123,9 @@ pthread_cond_timedwait_relative(pthread_cond_t *cond,
pthread_extricate_if extr;
/* Check whether the mutex is locked and owned by this thread. */
if (mutex->__m_kind != PTHREAD_MUTEX_FAST_NP && mutex->__m_owner != self)
if (mutex->__m_kind != PTHREAD_MUTEX_TIMED_NP
&& mutex->__m_kind != PTHREAD_MUTEX_FAST_NP
&& mutex->__m_owner != self)
return EINVAL;
/* Set up extrication interface */

View File

@ -29,7 +29,7 @@ int __pthread_mutex_init(pthread_mutex_t * mutex,
{
__pthread_init_lock(&mutex->__m_lock);
mutex->__m_kind =
mutex_attr == NULL ? PTHREAD_MUTEX_FAST_NP : mutex_attr->__mutexkind;
mutex_attr == NULL ? PTHREAD_MUTEX_TIMED_NP : mutex_attr->__mutexkind;
mutex->__m_count = 0;
mutex->__m_owner = NULL;
return 0;
@ -65,11 +65,14 @@ int __pthread_mutex_trylock(pthread_mutex_t * mutex)
}
return retcode;
case PTHREAD_MUTEX_ERRORCHECK_NP:
retcode = __pthread_trylock(&mutex->__m_lock);
retcode = __pthread_alt_trylock(&mutex->__m_lock);
if (retcode == 0) {
mutex->__m_owner = thread_self();
}
return retcode;
case PTHREAD_MUTEX_TIMED_NP:
retcode = __pthread_alt_trylock(&mutex->__m_lock);
return retcode;
default:
return EINVAL;
}
@ -97,15 +100,61 @@ int __pthread_mutex_lock(pthread_mutex_t * mutex)
case PTHREAD_MUTEX_ERRORCHECK_NP:
self = thread_self();
if (mutex->__m_owner == self) return EDEADLK;
__pthread_lock(&mutex->__m_lock, self);
__pthread_alt_lock(&mutex->__m_lock, self);
mutex->__m_owner = self;
return 0;
case PTHREAD_MUTEX_TIMED_NP:
__pthread_alt_lock(&mutex->__m_lock, NULL);
return 0;
default:
return EINVAL;
}
}
strong_alias (__pthread_mutex_lock, pthread_mutex_lock)
int __pthread_mutex_timedlock (pthread_mutex_t *mutex,
const struct timespec *abstime)
{
pthread_descr self;
int res;
if (abstime->tv_nsec < 0 || abstime->tv_nsec >= 1000000000)
return EINVAL;
switch(mutex->__m_kind) {
case PTHREAD_MUTEX_FAST_NP:
__pthread_lock(&mutex->__m_lock, NULL);
return 0;
case PTHREAD_MUTEX_RECURSIVE_NP:
self = thread_self();
if (mutex->__m_owner == self) {
mutex->__m_count++;
return 0;
}
__pthread_lock(&mutex->__m_lock, self);
mutex->__m_owner = self;
mutex->__m_count = 0;
return 0;
case PTHREAD_MUTEX_ERRORCHECK_NP:
self = thread_self();
if (mutex->__m_owner == self) return EDEADLK;
res = __pthread_alt_timedlock(&mutex->__m_lock, self, abstime);
if (res != 0)
{
mutex->__m_owner = self;
return 0;
}
return ETIMEDOUT;
case PTHREAD_MUTEX_TIMED_NP:
/* Only this type supports timed out lock. */
return (__pthread_alt_timedlock(&mutex->__m_lock, NULL, abstime)
? 0 : ETIMEDOUT);
default:
return EINVAL;
}
}
strong_alias (__pthread_mutex_timedlock, pthread_mutex_timedlock)
int __pthread_mutex_unlock(pthread_mutex_t * mutex)
{
switch (mutex->__m_kind) {
@ -124,7 +173,10 @@ int __pthread_mutex_unlock(pthread_mutex_t * mutex)
if (mutex->__m_owner != thread_self() || mutex->__m_lock.__status == 0)
return EPERM;
mutex->__m_owner = NULL;
__pthread_unlock(&mutex->__m_lock);
__pthread_alt_unlock(&mutex->__m_lock);
return 0;
case PTHREAD_MUTEX_TIMED_NP:
__pthread_alt_unlock(&mutex->__m_lock);
return 0;
default:
return EINVAL;
@ -134,7 +186,7 @@ strong_alias (__pthread_mutex_unlock, pthread_mutex_unlock)
int __pthread_mutexattr_init(pthread_mutexattr_t *attr)
{
attr->__mutexkind = PTHREAD_MUTEX_FAST_NP;
attr->__mutexkind = PTHREAD_MUTEX_TIMED_NP;
return 0;
}
strong_alias (__pthread_mutexattr_init, pthread_mutexattr_init)
@ -149,7 +201,8 @@ int __pthread_mutexattr_settype(pthread_mutexattr_t *attr, int kind)
{
if (kind != PTHREAD_MUTEX_FAST_NP
&& kind != PTHREAD_MUTEX_RECURSIVE_NP
&& kind != PTHREAD_MUTEX_ERRORCHECK_NP)
&& kind != PTHREAD_MUTEX_ERRORCHECK_NP
&& kind != PTHREAD_MUTEX_TIMED_NP)
return EINVAL;
attr->__mutexkind = kind;
return 0;

View File

@ -214,10 +214,10 @@ __pthread_rwlock_destroy (pthread_rwlock_t *rwlock)
int readers;
_pthread_descr writer;
__pthread_lock (&rwlock->__rw_lock, NULL);
__pthread_alt_lock (&rwlock->__rw_lock, NULL);
readers = rwlock->__rw_readers;
writer = rwlock->__rw_writer;
__pthread_unlock (&rwlock->__rw_lock);
__pthread_alt_unlock (&rwlock->__rw_lock);
if (readers > 0 || writer != NULL)
return EBUSY;
@ -236,23 +236,23 @@ __pthread_rwlock_rdlock (pthread_rwlock_t *rwlock)
have_lock_already = rwlock_have_already(&self, rwlock,
&existing, &out_of_mem);
for (;;)
{
if (self == NULL)
self = thread_self ();
__pthread_lock (&rwlock->__rw_lock, self);
for (;;)
{
__pthread_alt_lock (&rwlock->__rw_lock, self);
if (rwlock_can_rdlock(rwlock, have_lock_already))
break;
enqueue (&rwlock->__rw_read_waiting, self);
__pthread_unlock (&rwlock->__rw_lock);
__pthread_alt_unlock (&rwlock->__rw_lock);
suspend (self); /* This is not a cancellation point */
}
++rwlock->__rw_readers;
__pthread_unlock (&rwlock->__rw_lock);
__pthread_alt_unlock (&rwlock->__rw_lock);
if (have_lock_already || out_of_mem)
{
@ -266,6 +266,51 @@ __pthread_rwlock_rdlock (pthread_rwlock_t *rwlock)
}
strong_alias (__pthread_rwlock_rdlock, pthread_rwlock_rdlock)
int
__pthread_rwlock_timedrdlock (pthread_rwlock_t *rwlock,
const struct timespec *abstime)
{
pthread_descr self = NULL;
pthread_readlock_info *existing;
int out_of_mem, have_lock_already;
if (abstime->tv_nsec < 0 || abstime->tv_nsec >= 1000000000)
return EINVAL;
have_lock_already = rwlock_have_already(&self, rwlock,
&existing, &out_of_mem);
if (self == NULL)
self = thread_self ();
for (;;)
{
if (__pthread_alt_timedlock (&rwlock->__rw_lock, self, abstime) == 0)
return ETIMEDOUT;
if (rwlock_can_rdlock(rwlock, have_lock_already))
break;
enqueue (&rwlock->__rw_read_waiting, self);
__pthread_alt_unlock (&rwlock->__rw_lock);
suspend (self); /* This is not a cancellation point */
}
++rwlock->__rw_readers;
__pthread_alt_unlock (&rwlock->__rw_lock);
if (have_lock_already || out_of_mem)
{
if (existing != NULL)
existing->pr_lock_count++;
else
self->p_untracked_readlock_count++;
}
return 0;
}
strong_alias (__pthread_rwlock_timedrdlock, pthread_rwlock_timedrdlock)
int
__pthread_rwlock_tryrdlock (pthread_rwlock_t *rwlock)
{
@ -277,7 +322,7 @@ __pthread_rwlock_tryrdlock (pthread_rwlock_t *rwlock)
have_lock_already = rwlock_have_already(&self, rwlock,
&existing, &out_of_mem);
__pthread_lock (&rwlock->__rw_lock, self);
__pthread_alt_lock (&rwlock->__rw_lock, self);
/* 0 is passed to here instead of have_lock_already.
This is to meet Single Unix Spec requirements:
@ -291,7 +336,7 @@ __pthread_rwlock_tryrdlock (pthread_rwlock_t *rwlock)
retval = 0;
}
__pthread_unlock (&rwlock->__rw_lock);
__pthread_alt_unlock (&rwlock->__rw_lock);
if (retval == 0)
{
@ -316,35 +361,67 @@ __pthread_rwlock_wrlock (pthread_rwlock_t *rwlock)
while(1)
{
__pthread_lock (&rwlock->__rw_lock, self);
__pthread_alt_lock (&rwlock->__rw_lock, self);
if (rwlock->__rw_readers == 0 && rwlock->__rw_writer == NULL)
{
rwlock->__rw_writer = self;
__pthread_unlock (&rwlock->__rw_lock);
__pthread_alt_unlock (&rwlock->__rw_lock);
return 0;
}
/* Suspend ourselves, then try again */
enqueue (&rwlock->__rw_write_waiting, self);
__pthread_unlock (&rwlock->__rw_lock);
__pthread_alt_unlock (&rwlock->__rw_lock);
suspend (self); /* This is not a cancellation point */
}
}
strong_alias (__pthread_rwlock_wrlock, pthread_rwlock_wrlock)
int
__pthread_rwlock_timedwrlock (pthread_rwlock_t *rwlock,
const struct timespec *abstime)
{
pthread_descr self;
if (abstime->tv_nsec < 0 || abstime->tv_nsec >= 1000000000)
return EINVAL;
self = thread_self ();
while(1)
{
if (__pthread_alt_timedlock (&rwlock->__rw_lock, self, abstime) == 0)
return ETIMEDOUT;
if (rwlock->__rw_readers == 0 && rwlock->__rw_writer == NULL)
{
rwlock->__rw_writer = self;
__pthread_alt_unlock (&rwlock->__rw_lock);
return 0;
}
/* Suspend ourselves, then try again */
enqueue (&rwlock->__rw_write_waiting, self);
__pthread_alt_unlock (&rwlock->__rw_lock);
suspend (self); /* This is not a cancellation point */
}
}
strong_alias (__pthread_rwlock_timedwrlock, pthread_rwlock_timedwrlock)
int
__pthread_rwlock_trywrlock (pthread_rwlock_t *rwlock)
{
int result = EBUSY;
__pthread_lock (&rwlock->__rw_lock, NULL);
__pthread_alt_lock (&rwlock->__rw_lock, NULL);
if (rwlock->__rw_readers == 0 && rwlock->__rw_writer == NULL)
{
rwlock->__rw_writer = thread_self ();
result = 0;
}
__pthread_unlock (&rwlock->__rw_lock);
__pthread_alt_unlock (&rwlock->__rw_lock);
return result;
}
@ -357,13 +434,13 @@ __pthread_rwlock_unlock (pthread_rwlock_t *rwlock)
pthread_descr torestart;
pthread_descr th;
__pthread_lock (&rwlock->__rw_lock, NULL);
__pthread_alt_lock (&rwlock->__rw_lock, NULL);
if (rwlock->__rw_writer != NULL)
{
/* Unlocking a write lock. */
if (rwlock->__rw_writer != thread_self ())
{
__pthread_unlock (&rwlock->__rw_lock);
__pthread_alt_unlock (&rwlock->__rw_lock);
return EPERM;
}
rwlock->__rw_writer = NULL;
@ -375,14 +452,14 @@ __pthread_rwlock_unlock (pthread_rwlock_t *rwlock)
/* Restart all waiting readers. */
torestart = rwlock->__rw_read_waiting;
rwlock->__rw_read_waiting = NULL;
__pthread_unlock (&rwlock->__rw_lock);
__pthread_alt_unlock (&rwlock->__rw_lock);
while ((th = dequeue (&torestart)) != NULL)
restart (th);
}
else
{
/* Restart one waiting writer. */
__pthread_unlock (&rwlock->__rw_lock);
__pthread_alt_unlock (&rwlock->__rw_lock);
restart (th);
}
}
@ -391,7 +468,7 @@ __pthread_rwlock_unlock (pthread_rwlock_t *rwlock)
/* Unlocking a read lock. */
if (rwlock->__rw_readers == 0)
{
__pthread_unlock (&rwlock->__rw_lock);
__pthread_alt_unlock (&rwlock->__rw_lock);
return EPERM;
}
@ -402,7 +479,7 @@ __pthread_rwlock_unlock (pthread_rwlock_t *rwlock)
else
th = NULL;
__pthread_unlock (&rwlock->__rw_lock);
__pthread_alt_unlock (&rwlock->__rw_lock);
if (th != NULL)
restart (th);

View File

@ -17,6 +17,8 @@
#include <errno.h>
#include <sched.h>
#include <time.h>
#include <stdlib.h>
#include <limits.h>
#include "pthread.h"
#include "internals.h"
#include "spinlock.h"
@ -147,6 +149,262 @@ again:
return 0;
}
/*
* Alternate fastlocks do not queue threads directly. Instead, they queue
* these wait queue node structures. When a timed wait wakes up due to
* a timeout, it can leave its wait node in the queue (because there
* is no safe way to remove from the quue). Some other thread will
* deallocate the abandoned node.
*/
struct wait_node {
struct wait_node *next; /* Next node in null terminated linked list */
pthread_descr thr; /* The thread waiting with this node */
int abandoned; /* Atomic flag */
};
static long wait_node_free_list;
static int wait_node_free_list_spinlock;
/* Allocate a new node from the head of the free list using an atomic
operation, or else using malloc if that list is empty. A fundamental
assumption here is that we can safely access wait_node_free_list->next.
That's because we never free nodes once we allocate them, so a pointer to a
node remains valid indefinitely. */
static struct wait_node *wait_node_alloc(void)
{
long oldvalue, newvalue;
do {
oldvalue = wait_node_free_list;
if (oldvalue == 0)
return malloc(sizeof *wait_node_alloc());
newvalue = (long) ((struct wait_node *) oldvalue)->next;
WRITE_MEMORY_BARRIER();
} while (! compare_and_swap(&wait_node_free_list, oldvalue, newvalue,
&wait_node_free_list_spinlock));
return (struct wait_node *) oldvalue;
}
/* Return a node to the head of the free list using an atomic
operation. */
static void wait_node_free(struct wait_node *wn)
{
long oldvalue, newvalue;
do {
oldvalue = wait_node_free_list;
wn->next = (struct wait_node *) oldvalue;
newvalue = (long) wn;
WRITE_MEMORY_BARRIER();
} while (! compare_and_swap(&wait_node_free_list, oldvalue, newvalue,
&wait_node_free_list_spinlock));
}
/* Remove a wait node from the specified queue. It is assumed
that the removal takes place concurrently with only atomic insertions at the
head of the queue. */
static void wait_node_dequeue(struct wait_node **pp_head,
struct wait_node **pp_node,
struct wait_node *p_node,
int *spinlock)
{
long oldvalue, newvalue;
/* If the node is being deleted from the head of the
list, it must be deleted using atomic compare-and-swap.
Otherwise it can be deleted in the straightforward way. */
if (pp_node == pp_head) {
oldvalue = (long) p_node;
newvalue = (long) p_node->next;
if (compare_and_swap((long *) pp_node, oldvalue, newvalue, spinlock))
return;
/* Oops! Compare and swap failed, which means the node is
no longer first. We delete it using the ordinary method. But we don't
know the identity of the node which now holds the pointer to the node
being deleted, so we must search from the beginning. */
for (pp_node = pp_head; *pp_node != p_node; pp_node = &(*pp_node)->next)
; /* null body */
}
*pp_node = p_node->next;
return;
}
void __pthread_alt_lock(struct _pthread_fastlock * lock,
pthread_descr self)
{
struct wait_node wait_node;
long oldstatus, newstatus;
do {
oldstatus = lock->__status;
if (oldstatus == 0) {
newstatus = 1;
} else {
if (self == NULL)
wait_node.thr = self = thread_self();
newstatus = (long) &wait_node;
}
wait_node.abandoned = 0;
wait_node.next = (struct wait_node *) oldstatus;
/* Make sure the store in wait_node.next completes before performing
the compare-and-swap */
MEMORY_BARRIER();
} while(! compare_and_swap(&lock->__status, oldstatus, newstatus,
&lock->__spinlock));
/* Suspend. Note that unlike in __pthread_lock, we don't worry
here about spurious wakeup. That's because this lock is not
used in situations where that can happen; the restart can
only come from the previous lock owner. */
if (oldstatus != 0)
suspend(self);
}
/* Timed-out lock operation; returns 0 to indicate timeout. */
int __pthread_alt_timedlock(struct _pthread_fastlock * lock,
pthread_descr self, const struct timespec *abstime)
{
struct wait_node *p_wait_node = wait_node_alloc();
long oldstatus, newstatus;
/* Out of memory, just give up and do ordinary lock. */
if (p_wait_node == 0) {
__pthread_alt_lock(lock, self);
return 1;
}
do {
oldstatus = lock->__status;
if (oldstatus == 0) {
newstatus = 1;
} else {
if (self == NULL)
p_wait_node->thr = self = thread_self();
newstatus = (long) p_wait_node;
}
p_wait_node->abandoned = 0;
p_wait_node->next = (struct wait_node *) oldstatus;
/* Make sure the store in wait_node.next completes before performing
the compare-and-swap */
MEMORY_BARRIER();
} while(! compare_and_swap(&lock->__status, oldstatus, newstatus,
&lock->__spinlock));
/* If we did not get the lock, do a timed suspend. If we wake up due
to a timeout, then there is a race; the old lock owner may try
to remove us from the queue. This race is resolved by us and the owner
doing an atomic testandset() to change the state of the wait node from 0
to 1. If we succeed, then it's a timeout and we abandon the node in the
queue. If we fail, it means the owner gave us the lock. */
if (oldstatus != 0) {
if (timedsuspend(self, abstime) == 0) {
if (!testandset(&p_wait_node->abandoned))
return 0; /* Timeout! */
/* Eat oustanding resume from owner, otherwise wait_node_free() below
will race with owner's wait_node_dequeue(). */
suspend(self);
}
}
wait_node_free(p_wait_node);
return 1; /* Got the lock! */
}
void __pthread_alt_unlock(struct _pthread_fastlock *lock)
{
long oldstatus;
struct wait_node *p_node, **pp_node, *p_max_prio, **pp_max_prio;
struct wait_node ** const pp_head = (struct wait_node **) &lock->__status;
int maxprio;
while (1) {
/* If no threads are waiting for this lock, try to just
atomically release it. */
oldstatus = lock->__status;
if (oldstatus == 0 || oldstatus == 1) {
if (compare_and_swap_with_release_semantics (&lock->__status,
oldstatus, 0, &lock->__spinlock))
return;
else
continue;
}
/* Process the entire queue of wait nodes. Remove all abandoned
wait nodes and put them into the global free queue, and
remember the one unabandoned node which refers to the thread
having the highest priority. */
pp_max_prio = pp_node = pp_head;
p_max_prio = p_node = *pp_head;
maxprio = INT_MIN;
while (p_node != (struct wait_node *) 1) {
int prio;
if (p_node->abandoned) {
/* Remove abandoned node. */
wait_node_dequeue(pp_head, pp_node, p_node, &lock->__spinlock);
wait_node_free(p_node);
READ_MEMORY_BARRIER();
p_node = *pp_node;
continue;
} else if ((prio = p_node->thr->p_priority) >= maxprio) {
/* Otherwise remember it if its thread has a higher or equal priority
compared to that of any node seen thus far. */
maxprio = prio;
pp_max_prio = pp_node;
p_max_prio = p_node;
}
pp_node = &p_node->next;
READ_MEMORY_BARRIER();
p_node = *pp_node;
}
READ_MEMORY_BARRIER();
/* If all threads abandoned, go back to top */
if (maxprio == INT_MIN)
continue;
ASSERT (p_max_prio != (struct wait_node *) 1);
/* Now we want to to remove the max priority thread's wait node from
the list. Before we can do this, we must atomically try to change the
node's abandon state from zero to nonzero. If we succeed, that means we
have the node that we will wake up. If we failed, then it means the
thread timed out and abandoned the node in which case we repeat the
whole unlock operation. */
if (!testandset(&p_max_prio->abandoned)) {
wait_node_dequeue(pp_head, pp_max_prio, p_max_prio, &lock->__spinlock);
WRITE_MEMORY_BARRIER();
restart(p_max_prio->thr);
return;
}
}
}
/* Compare-and-swap emulation with a spinlock */

View File

@ -106,7 +106,39 @@ static inline int __pthread_trylock (struct _pthread_fastlock * lock)
return 0;
}
/* Variation of internal lock used for pthread_mutex_t, supporting
timed-out waits. Warning: do not mix these operations with the above ones
over the same lock object! */
extern void __pthread_alt_lock(struct _pthread_fastlock * lock,
pthread_descr self);
extern int __pthread_alt_timedlock(struct _pthread_fastlock * lock,
pthread_descr self, const struct timespec *abstime);
extern void __pthread_alt_unlock(struct _pthread_fastlock *lock);
static inline void __pthread_alt_init_lock(struct _pthread_fastlock * lock)
{
lock->__status = 0;
lock->__spinlock = 0;
}
static inline int __pthread_alt_trylock (struct _pthread_fastlock * lock)
{
long oldstatus;
do {
oldstatus = lock->__status;
if (oldstatus != 0) return EBUSY;
} while(! compare_and_swap(&lock->__status, 0, 1, &lock->__spinlock));
return 0;
}
/* Initializers for both lock variants */
#define LOCK_INITIALIZER {0, 0}
#define ALT_LOCK_INITIALIZER {0, 0}
/* Operations on pthread_atomic, which is defined in internals.h */

View File

@ -30,7 +30,7 @@ __BEGIN_DECLS
/* Initializers. */
#define PTHREAD_MUTEX_INITIALIZER \
{0, 0, 0, PTHREAD_MUTEX_FAST_NP, {0, 0}}
{0, 0, 0, PTHREAD_MUTEX_TIMED_NP, {0, 0}}
#ifdef __USE_GNU
# define PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP \
{0, 0, 0, PTHREAD_MUTEX_RECURSIVE_NP, {0, 0}}
@ -79,10 +79,11 @@ enum
{
PTHREAD_MUTEX_FAST_NP,
PTHREAD_MUTEX_RECURSIVE_NP,
PTHREAD_MUTEX_ERRORCHECK_NP
PTHREAD_MUTEX_ERRORCHECK_NP,
PTHREAD_MUTEX_TIMED_NP
#ifdef __USE_UNIX98
,
PTHREAD_MUTEX_NORMAL = PTHREAD_MUTEX_FAST_NP,
PTHREAD_MUTEX_NORMAL = PTHREAD_MUTEX_TIMED_NP,
PTHREAD_MUTEX_RECURSIVE = PTHREAD_MUTEX_RECURSIVE_NP,
PTHREAD_MUTEX_ERRORCHECK = PTHREAD_MUTEX_ERRORCHECK_NP,
PTHREAD_MUTEX_DEFAULT = PTHREAD_MUTEX_NORMAL
@ -304,6 +305,13 @@ extern int pthread_mutex_trylock (pthread_mutex_t *__mutex) __THROW;
/* Wait until lock for MUTEX becomes available and lock it. */
extern int pthread_mutex_lock (pthread_mutex_t *__mutex) __THROW;
#ifdef __USE_XOPEN2K
/* Wait until lock becomes available, or specified time passes. */
extern int pthread_mutex_timedlock (pthread_mutex_t *__mutex,
__const struct timespec *__abstime)
__THROW;
#endif
/* Unlock MUTEX. */
extern int pthread_mutex_unlock (pthread_mutex_t *__mutex) __THROW;
@ -311,7 +319,7 @@ extern int pthread_mutex_unlock (pthread_mutex_t *__mutex) __THROW;
/* Functions for handling mutex attributes. */
/* Initialize mutex attribute object ATTR with default attributes
(kind is PTHREAD_MUTEX_FAST_NP). */
(kind is PTHREAD_MUTEX_TIMED_NP). */
extern int pthread_mutexattr_init (pthread_mutexattr_t *__attr) __THROW;
/* Destroy mutex attribute object ATTR. */
@ -385,12 +393,26 @@ extern int pthread_rwlock_rdlock (pthread_rwlock_t *__rwlock) __THROW;
/* Try to acquire read lock for RWLOCK. */
extern int pthread_rwlock_tryrdlock (pthread_rwlock_t *__rwlock) __THROW;
#ifdef __USE_XOPEN2K
/* Try to acquire read lock for RWLOCK or return after specfied time. */
extern int pthread_rwlock_timedrdlock (pthread_rwlock_t *__rwlock,
__const struct timespec *__abstime)
__THROW;
#endif
/* Acquire write lock for RWLOCK. */
extern int pthread_rwlock_wrlock (pthread_rwlock_t *__rwlock) __THROW;
/* Try to acquire writelock for RWLOCK. */
/* Try to acquire write lock for RWLOCK. */
extern int pthread_rwlock_trywrlock (pthread_rwlock_t *__rwlock) __THROW;
#ifdef __USE_XOPEN2K
/* Try to acquire write lock for RWLOCK or return after specfied time. */
extern int pthread_rwlock_timedwrlock (pthread_rwlock_t *__rwlock,
__const struct timespec *__abstime)
__THROW;
#endif
/* Unlock RWLOCK. */
extern int pthread_rwlock_unlock (pthread_rwlock_t *__rwlock) __THROW;