include/qemu: Move CONFIG_ATOMIC128_OPT handling to atomic128.h
Not only the routines in ldst_atomicity.c.inc need markup, but also the ones in the headers. Reviewed-by: Alex Bennée <alex.bennee@linaro.org> Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
This commit is contained in:
parent
87a8d12180
commit
333c813b06
@ -16,23 +16,6 @@
|
||||
#endif
|
||||
#define HAVE_al8_fast (ATOMIC_REG_SIZE >= 8)
|
||||
|
||||
/*
|
||||
* If __alignof(unsigned __int128) < 16, GCC may refuse to inline atomics
|
||||
* that are supported by the host, e.g. s390x. We can force the pointer to
|
||||
* have our known alignment with __builtin_assume_aligned, however prior to
|
||||
* GCC 13 that was only reliable with optimization enabled. See
|
||||
* https://gcc.gnu.org/bugzilla/show_bug.cgi?id=107389
|
||||
*/
|
||||
#if defined(CONFIG_ATOMIC128_OPT)
|
||||
# if !defined(__OPTIMIZE__)
|
||||
# define ATTRIBUTE_ATOMIC128_OPT __attribute__((optimize("O1")))
|
||||
# endif
|
||||
# define CONFIG_ATOMIC128
|
||||
#endif
|
||||
#ifndef ATTRIBUTE_ATOMIC128_OPT
|
||||
# define ATTRIBUTE_ATOMIC128_OPT
|
||||
#endif
|
||||
|
||||
#if defined(CONFIG_ATOMIC128)
|
||||
# define HAVE_al16_fast true
|
||||
#else
|
||||
|
@ -12,24 +12,28 @@
|
||||
#define HOST_ATOMIC128_CAS_H
|
||||
|
||||
#if defined(CONFIG_ATOMIC128)
|
||||
static inline Int128 atomic16_cmpxchg(Int128 *ptr, Int128 cmp, Int128 new)
|
||||
static inline Int128 ATTRIBUTE_ATOMIC128_OPT
|
||||
atomic16_cmpxchg(Int128 *ptr, Int128 cmp, Int128 new)
|
||||
{
|
||||
__int128_t *ptr_align = __builtin_assume_aligned(ptr, 16);
|
||||
Int128Alias r, c, n;
|
||||
|
||||
c.s = cmp;
|
||||
n.s = new;
|
||||
r.i = qatomic_cmpxchg__nocheck((__int128_t *)ptr, c.i, n.i);
|
||||
r.i = qatomic_cmpxchg__nocheck(ptr_align, c.i, n.i);
|
||||
return r.s;
|
||||
}
|
||||
# define HAVE_CMPXCHG128 1
|
||||
#elif defined(CONFIG_CMPXCHG128)
|
||||
static inline Int128 atomic16_cmpxchg(Int128 *ptr, Int128 cmp, Int128 new)
|
||||
static inline Int128 ATTRIBUTE_ATOMIC128_OPT
|
||||
atomic16_cmpxchg(Int128 *ptr, Int128 cmp, Int128 new)
|
||||
{
|
||||
__int128_t *ptr_align = __builtin_assume_aligned(ptr, 16);
|
||||
Int128Alias r, c, n;
|
||||
|
||||
c.s = cmp;
|
||||
n.s = new;
|
||||
r.i = __sync_val_compare_and_swap_16((__int128_t *)ptr, c.i, n.i);
|
||||
r.i = __sync_val_compare_and_swap_16(ptr_align, c.i, n.i);
|
||||
return r.s;
|
||||
}
|
||||
# define HAVE_CMPXCHG128 1
|
||||
|
@ -12,32 +12,38 @@
|
||||
#define HOST_ATOMIC128_LDST_H
|
||||
|
||||
#if defined(CONFIG_ATOMIC128)
|
||||
static inline Int128 atomic16_read(Int128 *ptr)
|
||||
static inline Int128 ATTRIBUTE_ATOMIC128_OPT
|
||||
atomic16_read(Int128 *ptr)
|
||||
{
|
||||
__int128_t *ptr_align = __builtin_assume_aligned(ptr, 16);
|
||||
Int128Alias r;
|
||||
|
||||
r.i = qatomic_read__nocheck((__int128_t *)ptr);
|
||||
r.i = qatomic_read__nocheck(ptr_align);
|
||||
return r.s;
|
||||
}
|
||||
|
||||
static inline void atomic16_set(Int128 *ptr, Int128 val)
|
||||
static inline void ATTRIBUTE_ATOMIC128_OPT
|
||||
atomic16_set(Int128 *ptr, Int128 val)
|
||||
{
|
||||
__int128_t *ptr_align = __builtin_assume_aligned(ptr, 16);
|
||||
Int128Alias v;
|
||||
|
||||
v.s = val;
|
||||
qatomic_set__nocheck((__int128_t *)ptr, v.i);
|
||||
qatomic_set__nocheck(ptr_align, v.i);
|
||||
}
|
||||
|
||||
# define HAVE_ATOMIC128 1
|
||||
#elif !defined(CONFIG_USER_ONLY) && HAVE_CMPXCHG128
|
||||
static inline Int128 atomic16_read(Int128 *ptr)
|
||||
static inline Int128 ATTRIBUTE_ATOMIC128_OPT
|
||||
atomic16_read(Int128 *ptr)
|
||||
{
|
||||
/* Maybe replace 0 with 0, returning the old value. */
|
||||
Int128 z = int128_make64(0);
|
||||
return atomic16_cmpxchg(ptr, z, z);
|
||||
}
|
||||
|
||||
static inline void atomic16_set(Int128 *ptr, Int128 val)
|
||||
static inline void ATTRIBUTE_ATOMIC128_OPT
|
||||
atomic16_set(Int128 *ptr, Int128 val)
|
||||
{
|
||||
Int128 old = *ptr, cmp;
|
||||
do {
|
||||
|
@ -15,6 +15,23 @@
|
||||
|
||||
#include "qemu/int128.h"
|
||||
|
||||
/*
|
||||
* If __alignof(unsigned __int128) < 16, GCC may refuse to inline atomics
|
||||
* that are supported by the host, e.g. s390x. We can force the pointer to
|
||||
* have our known alignment with __builtin_assume_aligned, however prior to
|
||||
* GCC 13 that was only reliable with optimization enabled. See
|
||||
* https://gcc.gnu.org/bugzilla/show_bug.cgi?id=107389
|
||||
*/
|
||||
#if defined(CONFIG_ATOMIC128_OPT)
|
||||
# if !defined(__OPTIMIZE__)
|
||||
# define ATTRIBUTE_ATOMIC128_OPT __attribute__((optimize("O1")))
|
||||
# endif
|
||||
# define CONFIG_ATOMIC128
|
||||
#endif
|
||||
#ifndef ATTRIBUTE_ATOMIC128_OPT
|
||||
# define ATTRIBUTE_ATOMIC128_OPT
|
||||
#endif
|
||||
|
||||
/*
|
||||
* GCC is a house divided about supporting large atomic operations.
|
||||
*
|
||||
|
Loading…
Reference in New Issue
Block a user