Use uint64_t and (uint64_t) 1 for 64-bit int

This patch replaces unsigned long int and 1UL with uint64_t and
(uint64_t) 1 to support ILP32 targets like x32.

	[BZ #17870]
	* nptl/sem_post.c (__new_sem_post): Replace unsigned long int
	with uint64_t.
	* nptl/sem_waitcommon.c (__sem_wait_cleanup): Replace 1UL with
	(uint64_t) 1.
	(__new_sem_wait_slow): Replace unsigned long int with uint64_t.
	Replace 1UL with (uint64_t) 1.
	* sysdeps/nptl/internaltypes.h (new_sem): Replace unsigned long
	int with uint64_t.
This commit is contained in:
H.J. Lu 2015-01-23 14:48:40 -08:00
parent 2ec2d7032f
commit 22971c35e2
5 changed files with 20 additions and 8 deletions

View File

@ -1,3 +1,15 @@
2015-01-23 H.J. Lu <hongjiu.lu@intel.com>
[BZ #17870]
* nptl/sem_post.c (__new_sem_post): Replace unsigned long int
with uint64_t.
* nptl/sem_waitcommon.c (__sem_wait_cleanup): Replace 1UL with
(uint64_t) 1.
(__new_sem_wait_slow): Replace unsigned long int with uint64_t.
Replace 1UL with (uint64_t) 1.
* sysdeps/nptl/internaltypes.h (new_sem): Replace unsigned long
int with uint64_t.
2015-01-23 Roland McGrath <roland@hack.frob.com>
* inet/if_index.c (if_nameindex): Add missing libc_hidden_weak.

2
NEWS
View File

@ -18,7 +18,7 @@ Version 2.21
17664, 17665, 17668, 17682, 17702, 17717, 17719, 17722, 17723, 17724,
17725, 17732, 17733, 17744, 17745, 17746, 17747, 17748, 17775, 17777,
17780, 17781, 17782, 17791, 17793, 17796, 17797, 17803, 17806, 17834,
17844, 17848
17844, 17848, 17870
* A new semaphore algorithm has been implemented in generic C code for all
machines. Previous custom assembly implementations of semaphore were

View File

@ -65,7 +65,7 @@ __new_sem_post (sem_t *sem)
added tokens before (the release sequence includes atomic RMW operations
by other threads). */
/* TODO Use atomic_fetch_add to make it scale better than a CAS loop? */
unsigned long int d = atomic_load_relaxed (&isem->data);
uint64_t d = atomic_load_relaxed (&isem->data);
do
{
if ((d & SEM_VALUE_MASK) == SEM_VALUE_MAX)

View File

@ -187,7 +187,7 @@ __sem_wait_cleanup (void *arg)
#if __HAVE_64B_ATOMICS
/* Stop being registered as a waiter. See below for MO. */
atomic_fetch_add_relaxed (&sem->data, -(1UL << SEM_NWAITERS_SHIFT));
atomic_fetch_add_relaxed (&sem->data, -((uint64_t) 1 << SEM_NWAITERS_SHIFT));
#else
__sem_wait_32_finish (sem);
#endif
@ -263,8 +263,8 @@ __new_sem_wait_slow (struct new_sem *sem, const struct timespec *abstime)
#if __HAVE_64B_ATOMICS
/* Add a waiter. Relaxed MO is sufficient because we can rely on the
ordering provided by the RMW operations we use. */
unsigned long d = atomic_fetch_add_relaxed (&sem->data,
1UL << SEM_NWAITERS_SHIFT);
uint64_t d = atomic_fetch_add_relaxed (&sem->data,
(uint64_t) 1 << SEM_NWAITERS_SHIFT);
pthread_cleanup_push (__sem_wait_cleanup, sem);
@ -304,7 +304,7 @@ __new_sem_wait_slow (struct new_sem *sem, const struct timespec *abstime)
err = -1;
/* Stop being registered as a waiter. */
atomic_fetch_add_relaxed (&sem->data,
-(1UL << SEM_NWAITERS_SHIFT));
-((uint64_t) 1 << SEM_NWAITERS_SHIFT));
break;
}
/* Relaxed MO is sufficient; see below. */
@ -320,7 +320,7 @@ __new_sem_wait_slow (struct new_sem *sem, const struct timespec *abstime)
up-to-date value; the futex_wait or the CAS perform the real
work. */
if (atomic_compare_exchange_weak_acquire (&sem->data,
&d, d - 1 - (1UL << SEM_NWAITERS_SHIFT)))
&d, d - 1 - ((uint64_t) 1 << SEM_NWAITERS_SHIFT)))
{
err = 0;
break;

View File

@ -155,7 +155,7 @@ struct new_sem
# endif
# define SEM_NWAITERS_SHIFT 32
# define SEM_VALUE_MASK (~(unsigned int)0)
unsigned long int data;
uint64_t data;
int private;
int pad;
#else