tcg: Rename helper_atomic_*_mmu and provide for user-only

Always provide the atomic interface using TCGMemOpIdx oi
and uintptr_t retaddr.  Rename from helper_* to cpu_* so
as to (mostly) match the exec/cpu_ldst.h functions, and
to emphasize that they are not callable from TCG directly.

Tested-by: Cole Robinson <crobinso@redhat.com>
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
This commit is contained in:
Richard Henderson 2021-07-16 14:20:49 -07:00
parent 9ef0c6d6a7
commit be9568b4e0
8 changed files with 104 additions and 118 deletions

View File

@ -2686,12 +2686,14 @@ void cpu_stq_le_data(CPUArchState *env, target_ulong ptr, uint64_t val)
cpu_stq_le_data_ra(env, ptr, val, 0);
}
/* First set of helpers allows passing in of OI and RETADDR. This makes
them callable from other helpers. */
/*
* First set of functions passes in OI and RETADDR.
* This makes them callable from other helpers.
*/
#define EXTRA_ARGS , TCGMemOpIdx oi, uintptr_t retaddr
#define ATOMIC_NAME(X) \
HELPER(glue(glue(glue(atomic_ ## X, SUFFIX), END), _mmu))
glue(glue(glue(cpu_atomic_ ## X, SUFFIX), END), _mmu)
#define ATOMIC_MMU_DECLS
#define ATOMIC_MMU_LOOKUP_RW \
atomic_mmu_lookup(env, addr, oi, DATA_SIZE, PAGE_READ | PAGE_WRITE, retaddr)

View File

@ -1234,19 +1234,23 @@ static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
return ret;
}
/* Macro to call the above, with local variables from the use context. */
#define ATOMIC_MMU_DECLS do {} while (0)
#define ATOMIC_MMU_LOOKUP_RW atomic_mmu_lookup(env, addr, DATA_SIZE, GETPC())
#include "atomic_common.c.inc"
/*
* First set of functions passes in OI and RETADDR.
* This makes them callable from other helpers.
*/
#define EXTRA_ARGS , TCGMemOpIdx oi, uintptr_t retaddr
#define ATOMIC_NAME(X) \
glue(glue(glue(cpu_atomic_ ## X, SUFFIX), END), _mmu)
#define ATOMIC_MMU_DECLS
#define ATOMIC_MMU_LOOKUP_RW atomic_mmu_lookup(env, addr, DATA_SIZE, retaddr)
#define ATOMIC_MMU_LOOKUP_R ATOMIC_MMU_LOOKUP_RW
#define ATOMIC_MMU_LOOKUP_W ATOMIC_MMU_LOOKUP_RW
#define ATOMIC_MMU_CLEANUP do { clear_helper_retaddr(); } while (0)
#define ATOMIC_MMU_IDX MMU_USER_IDX
#define ATOMIC_NAME(X) HELPER(glue(glue(atomic_ ## X, SUFFIX), END))
#define EXTRA_ARGS
#include "atomic_common.c.inc"
#define DATA_SIZE 1
#include "atomic_template.h"
@ -1261,20 +1265,33 @@ static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
#include "atomic_template.h"
#endif
/* The following is only callable from other helpers, and matches up
with the softmmu version. */
#if HAVE_ATOMIC128 || HAVE_CMPXCHG128
#undef EXTRA_ARGS
#undef ATOMIC_NAME
#undef ATOMIC_MMU_LOOKUP_RW
#define EXTRA_ARGS , TCGMemOpIdx oi, uintptr_t retaddr
#define ATOMIC_NAME(X) \
HELPER(glue(glue(glue(atomic_ ## X, SUFFIX), END), _mmu))
#define ATOMIC_MMU_LOOKUP_RW atomic_mmu_lookup(env, addr, DATA_SIZE, retaddr)
#define DATA_SIZE 16
#include "atomic_template.h"
#endif
/*
* Second set of functions is directly callable from TCG.
*/
#undef EXTRA_ARGS
#undef ATOMIC_NAME
#undef ATOMIC_MMU_DECLS
#define ATOMIC_NAME(X) HELPER(glue(glue(atomic_ ## X, SUFFIX), END))
#define EXTRA_ARGS
#define ATOMIC_MMU_DECLS uintptr_t retaddr = GETPC()
#define DATA_SIZE 1
#include "atomic_template.h"
#define DATA_SIZE 2
#include "atomic_template.h"
#define DATA_SIZE 4
#include "atomic_template.h"
#ifdef CONFIG_ATOMIC64
#define DATA_SIZE 8
#include "atomic_template.h"
#endif

View File

@ -1341,31 +1341,32 @@ void helper_be_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
# define helper_ret_stl_mmu helper_le_stl_mmu
# define helper_ret_stq_mmu helper_le_stq_mmu
#endif
#endif /* CONFIG_SOFTMMU */
uint32_t helper_atomic_cmpxchgb_mmu(CPUArchState *env, target_ulong addr,
uint32_t cpu_atomic_cmpxchgb_mmu(CPUArchState *env, target_ulong addr,
uint32_t cmpv, uint32_t newv,
TCGMemOpIdx oi, uintptr_t retaddr);
uint32_t cpu_atomic_cmpxchgw_le_mmu(CPUArchState *env, target_ulong addr,
uint32_t cmpv, uint32_t newv,
TCGMemOpIdx oi, uintptr_t retaddr);
uint32_t helper_atomic_cmpxchgw_le_mmu(CPUArchState *env, target_ulong addr,
uint32_t cmpv, uint32_t newv,
TCGMemOpIdx oi, uintptr_t retaddr);
uint32_t helper_atomic_cmpxchgl_le_mmu(CPUArchState *env, target_ulong addr,
uint32_t cmpv, uint32_t newv,
TCGMemOpIdx oi, uintptr_t retaddr);
uint64_t helper_atomic_cmpxchgq_le_mmu(CPUArchState *env, target_ulong addr,
uint64_t cmpv, uint64_t newv,
TCGMemOpIdx oi, uintptr_t retaddr);
uint32_t helper_atomic_cmpxchgw_be_mmu(CPUArchState *env, target_ulong addr,
uint32_t cmpv, uint32_t newv,
TCGMemOpIdx oi, uintptr_t retaddr);
uint32_t helper_atomic_cmpxchgl_be_mmu(CPUArchState *env, target_ulong addr,
uint32_t cmpv, uint32_t newv,
TCGMemOpIdx oi, uintptr_t retaddr);
uint64_t helper_atomic_cmpxchgq_be_mmu(CPUArchState *env, target_ulong addr,
uint64_t cmpv, uint64_t newv,
TCGMemOpIdx oi, uintptr_t retaddr);
uint32_t cpu_atomic_cmpxchgl_le_mmu(CPUArchState *env, target_ulong addr,
uint32_t cmpv, uint32_t newv,
TCGMemOpIdx oi, uintptr_t retaddr);
uint64_t cpu_atomic_cmpxchgq_le_mmu(CPUArchState *env, target_ulong addr,
uint64_t cmpv, uint64_t newv,
TCGMemOpIdx oi, uintptr_t retaddr);
uint32_t cpu_atomic_cmpxchgw_be_mmu(CPUArchState *env, target_ulong addr,
uint32_t cmpv, uint32_t newv,
TCGMemOpIdx oi, uintptr_t retaddr);
uint32_t cpu_atomic_cmpxchgl_be_mmu(CPUArchState *env, target_ulong addr,
uint32_t cmpv, uint32_t newv,
TCGMemOpIdx oi, uintptr_t retaddr);
uint64_t cpu_atomic_cmpxchgq_be_mmu(CPUArchState *env, target_ulong addr,
uint64_t cmpv, uint64_t newv,
TCGMemOpIdx oi, uintptr_t retaddr);
#define GEN_ATOMIC_HELPER(NAME, TYPE, SUFFIX) \
TYPE helper_atomic_ ## NAME ## SUFFIX ## _mmu \
TYPE cpu_atomic_ ## NAME ## SUFFIX ## _mmu \
(CPUArchState *env, target_ulong addr, TYPE val, \
TCGMemOpIdx oi, uintptr_t retaddr);
@ -1411,31 +1412,22 @@ GEN_ATOMIC_HELPER_ALL(xchg)
#undef GEN_ATOMIC_HELPER_ALL
#undef GEN_ATOMIC_HELPER
#endif /* CONFIG_SOFTMMU */
/*
* These aren't really a "proper" helpers because TCG cannot manage Int128.
* However, use the same format as the others, for use by the backends.
*
* The cmpxchg functions are only defined if HAVE_CMPXCHG128;
* the ld/st functions are only defined if HAVE_ATOMIC128,
* as defined by <qemu/atomic128.h>.
*/
Int128 helper_atomic_cmpxchgo_le_mmu(CPUArchState *env, target_ulong addr,
Int128 cmpv, Int128 newv,
TCGMemOpIdx oi, uintptr_t retaddr);
Int128 helper_atomic_cmpxchgo_be_mmu(CPUArchState *env, target_ulong addr,
Int128 cmpv, Int128 newv,
TCGMemOpIdx oi, uintptr_t retaddr);
Int128 cpu_atomic_cmpxchgo_le_mmu(CPUArchState *env, target_ulong addr,
Int128 cmpv, Int128 newv,
TCGMemOpIdx oi, uintptr_t retaddr);
Int128 cpu_atomic_cmpxchgo_be_mmu(CPUArchState *env, target_ulong addr,
Int128 cmpv, Int128 newv,
TCGMemOpIdx oi, uintptr_t retaddr);
Int128 helper_atomic_ldo_le_mmu(CPUArchState *env, target_ulong addr,
TCGMemOpIdx oi, uintptr_t retaddr);
Int128 helper_atomic_ldo_be_mmu(CPUArchState *env, target_ulong addr,
TCGMemOpIdx oi, uintptr_t retaddr);
void helper_atomic_sto_le_mmu(CPUArchState *env, target_ulong addr, Int128 val,
TCGMemOpIdx oi, uintptr_t retaddr);
void helper_atomic_sto_be_mmu(CPUArchState *env, target_ulong addr, Int128 val,
TCGMemOpIdx oi, uintptr_t retaddr);
Int128 cpu_atomic_ldo_le_mmu(CPUArchState *env, target_ulong addr,
TCGMemOpIdx oi, uintptr_t retaddr);
Int128 cpu_atomic_ldo_be_mmu(CPUArchState *env, target_ulong addr,
TCGMemOpIdx oi, uintptr_t retaddr);
void cpu_atomic_sto_le_mmu(CPUArchState *env, target_ulong addr, Int128 val,
TCGMemOpIdx oi, uintptr_t retaddr);
void cpu_atomic_sto_be_mmu(CPUArchState *env, target_ulong addr, Int128 val,
TCGMemOpIdx oi, uintptr_t retaddr);
#ifdef CONFIG_DEBUG_TCG
void tcg_assert_listed_vecop(TCGOpcode);

View File

@ -564,7 +564,7 @@ uint64_t HELPER(paired_cmpxchg64_le_parallel)(CPUARMState *env, uint64_t addr,
cmpv = int128_make128(env->exclusive_val, env->exclusive_high);
newv = int128_make128(new_lo, new_hi);
oldv = helper_atomic_cmpxchgo_le_mmu(env, addr, cmpv, newv, oi, ra);
oldv = cpu_atomic_cmpxchgo_le_mmu(env, addr, cmpv, newv, oi, ra);
success = int128_eq(oldv, cmpv);
return !success;
@ -638,7 +638,7 @@ uint64_t HELPER(paired_cmpxchg64_be_parallel)(CPUARMState *env, uint64_t addr,
*/
cmpv = int128_make128(env->exclusive_high, env->exclusive_val);
newv = int128_make128(new_hi, new_lo);
oldv = helper_atomic_cmpxchgo_be_mmu(env, addr, cmpv, newv, oi, ra);
oldv = cpu_atomic_cmpxchgo_be_mmu(env, addr, cmpv, newv, oi, ra);
success = int128_eq(oldv, cmpv);
return !success;
@ -660,7 +660,7 @@ void HELPER(casp_le_parallel)(CPUARMState *env, uint32_t rs, uint64_t addr,
cmpv = int128_make128(env->xregs[rs], env->xregs[rs + 1]);
newv = int128_make128(new_lo, new_hi);
oldv = helper_atomic_cmpxchgo_le_mmu(env, addr, cmpv, newv, oi, ra);
oldv = cpu_atomic_cmpxchgo_le_mmu(env, addr, cmpv, newv, oi, ra);
env->xregs[rs] = int128_getlo(oldv);
env->xregs[rs + 1] = int128_gethi(oldv);
@ -681,7 +681,7 @@ void HELPER(casp_be_parallel)(CPUARMState *env, uint32_t rs, uint64_t addr,
cmpv = int128_make128(env->xregs[rs + 1], env->xregs[rs]);
newv = int128_make128(new_lo, new_hi);
oldv = helper_atomic_cmpxchgo_be_mmu(env, addr, cmpv, newv, oi, ra);
oldv = cpu_atomic_cmpxchgo_be_mmu(env, addr, cmpv, newv, oi, ra);
env->xregs[rs + 1] = int128_getlo(oldv);
env->xregs[rs] = int128_gethi(oldv);

View File

@ -64,22 +64,12 @@ void helper_cmpxchg8b(CPUX86State *env, target_ulong a0)
cmpv = deposit64(env->regs[R_EAX], 32, 32, env->regs[R_EDX]);
newv = deposit64(env->regs[R_EBX], 32, 32, env->regs[R_ECX]);
#ifdef CONFIG_USER_ONLY
{
uint64_t *haddr = g2h(env_cpu(env), a0);
cmpv = cpu_to_le64(cmpv);
newv = cpu_to_le64(newv);
oldv = qatomic_cmpxchg__nocheck(haddr, cmpv, newv);
oldv = le64_to_cpu(oldv);
}
#else
{
uintptr_t ra = GETPC();
int mem_idx = cpu_mmu_index(env, false);
TCGMemOpIdx oi = make_memop_idx(MO_TEQ, mem_idx);
oldv = helper_atomic_cmpxchgq_le_mmu(env, a0, cmpv, newv, oi, ra);
oldv = cpu_atomic_cmpxchgq_le_mmu(env, a0, cmpv, newv, oi, ra);
}
#endif
if (oldv == cmpv) {
eflags |= CC_Z;
@ -147,8 +137,7 @@ void helper_cmpxchg16b(CPUX86State *env, target_ulong a0)
int mem_idx = cpu_mmu_index(env, false);
TCGMemOpIdx oi = make_memop_idx(MO_TEQ | MO_ALIGN_16, mem_idx);
Int128 oldv = helper_atomic_cmpxchgo_le_mmu(env, a0, cmpv,
newv, oi, ra);
Int128 oldv = cpu_atomic_cmpxchgo_le_mmu(env, a0, cmpv, newv, oi, ra);
if (int128_eq(oldv, cmpv)) {
eflags |= CC_Z;

View File

@ -22,6 +22,7 @@
#include "exec/exec-all.h"
#include "exec/cpu_ldst.h"
#include "semihosting/semihost.h"
#include "tcg/tcg.h"
#if defined(CONFIG_USER_ONLY)
@ -782,9 +783,9 @@ static void do_cas2l(CPUM68KState *env, uint32_t regs, uint32_t a1, uint32_t a2,
uint32_t u2 = env->dregs[Du2];
uint32_t l1, l2;
uintptr_t ra = GETPC();
#if defined(CONFIG_ATOMIC64) && !defined(CONFIG_USER_ONLY)
#if defined(CONFIG_ATOMIC64)
int mmu_idx = cpu_mmu_index(env, 0);
TCGMemOpIdx oi;
TCGMemOpIdx oi = make_memop_idx(MO_BEQ, mmu_idx);
#endif
if (parallel) {
@ -794,23 +795,13 @@ static void do_cas2l(CPUM68KState *env, uint32_t regs, uint32_t a1, uint32_t a2,
if ((a1 & 7) == 0 && a2 == a1 + 4) {
c = deposit64(c2, 32, 32, c1);
u = deposit64(u2, 32, 32, u1);
#ifdef CONFIG_USER_ONLY
l = helper_atomic_cmpxchgq_be(env, a1, c, u);
#else
oi = make_memop_idx(MO_BEQ, mmu_idx);
l = helper_atomic_cmpxchgq_be_mmu(env, a1, c, u, oi, ra);
#endif
l = cpu_atomic_cmpxchgq_be_mmu(env, a1, c, u, oi, ra);
l1 = l >> 32;
l2 = l;
} else if ((a2 & 7) == 0 && a1 == a2 + 4) {
c = deposit64(c1, 32, 32, c2);
u = deposit64(u1, 32, 32, u2);
#ifdef CONFIG_USER_ONLY
l = helper_atomic_cmpxchgq_be(env, a2, c, u);
#else
oi = make_memop_idx(MO_BEQ, mmu_idx);
l = helper_atomic_cmpxchgq_be_mmu(env, a2, c, u, oi, ra);
#endif
l = cpu_atomic_cmpxchgq_be_mmu(env, a2, c, u, oi, ra);
l2 = l >> 32;
l1 = l;
} else

View File

@ -376,7 +376,7 @@ uint64_t helper_lq_le_parallel(CPUPPCState *env, target_ulong addr,
/* We will have raised EXCP_ATOMIC from the translator. */
assert(HAVE_ATOMIC128);
ret = helper_atomic_ldo_le_mmu(env, addr, opidx, GETPC());
ret = cpu_atomic_ldo_le_mmu(env, addr, opidx, GETPC());
env->retxh = int128_gethi(ret);
return int128_getlo(ret);
}
@ -388,7 +388,7 @@ uint64_t helper_lq_be_parallel(CPUPPCState *env, target_ulong addr,
/* We will have raised EXCP_ATOMIC from the translator. */
assert(HAVE_ATOMIC128);
ret = helper_atomic_ldo_be_mmu(env, addr, opidx, GETPC());
ret = cpu_atomic_ldo_be_mmu(env, addr, opidx, GETPC());
env->retxh = int128_gethi(ret);
return int128_getlo(ret);
}
@ -401,7 +401,7 @@ void helper_stq_le_parallel(CPUPPCState *env, target_ulong addr,
/* We will have raised EXCP_ATOMIC from the translator. */
assert(HAVE_ATOMIC128);
val = int128_make128(lo, hi);
helper_atomic_sto_le_mmu(env, addr, val, opidx, GETPC());
cpu_atomic_sto_le_mmu(env, addr, val, opidx, GETPC());
}
void helper_stq_be_parallel(CPUPPCState *env, target_ulong addr,
@ -412,7 +412,7 @@ void helper_stq_be_parallel(CPUPPCState *env, target_ulong addr,
/* We will have raised EXCP_ATOMIC from the translator. */
assert(HAVE_ATOMIC128);
val = int128_make128(lo, hi);
helper_atomic_sto_be_mmu(env, addr, val, opidx, GETPC());
cpu_atomic_sto_be_mmu(env, addr, val, opidx, GETPC());
}
uint32_t helper_stqcx_le_parallel(CPUPPCState *env, target_ulong addr,
@ -429,8 +429,8 @@ uint32_t helper_stqcx_le_parallel(CPUPPCState *env, target_ulong addr,
cmpv = int128_make128(env->reserve_val2, env->reserve_val);
newv = int128_make128(new_lo, new_hi);
oldv = helper_atomic_cmpxchgo_le_mmu(env, addr, cmpv, newv,
opidx, GETPC());
oldv = cpu_atomic_cmpxchgo_le_mmu(env, addr, cmpv, newv,
opidx, GETPC());
success = int128_eq(oldv, cmpv);
}
env->reserve_addr = -1;
@ -451,8 +451,8 @@ uint32_t helper_stqcx_be_parallel(CPUPPCState *env, target_ulong addr,
cmpv = int128_make128(env->reserve_val2, env->reserve_val);
newv = int128_make128(new_lo, new_hi);
oldv = helper_atomic_cmpxchgo_be_mmu(env, addr, cmpv, newv,
opidx, GETPC());
oldv = cpu_atomic_cmpxchgo_be_mmu(env, addr, cmpv, newv,
opidx, GETPC());
success = int128_eq(oldv, cmpv);
}
env->reserve_addr = -1;

View File

@ -1811,7 +1811,7 @@ void HELPER(cdsg_parallel)(CPUS390XState *env, uint64_t addr,
mem_idx = cpu_mmu_index(env, false);
oi = make_memop_idx(MO_TEQ | MO_ALIGN_16, mem_idx);
oldv = helper_atomic_cmpxchgo_be_mmu(env, addr, cmpv, newv, oi, ra);
oldv = cpu_atomic_cmpxchgo_be_mmu(env, addr, cmpv, newv, oi, ra);
fail = !int128_eq(oldv, cmpv);
env->cc_op = fail;
@ -1884,7 +1884,7 @@ static uint32_t do_csst(CPUS390XState *env, uint32_t r3, uint64_t a1,
ov = qatomic_cmpxchg__nocheck(haddr, cv, nv);
#else
TCGMemOpIdx oi = make_memop_idx(MO_TEUL | MO_ALIGN, mem_idx);
ov = helper_atomic_cmpxchgl_be_mmu(env, a1, cv, nv, oi, ra);
ov = cpu_atomic_cmpxchgl_be_mmu(env, a1, cv, nv, oi, ra);
#endif
} else {
ov = cpu_ldl_data_ra(env, a1, ra);
@ -1903,13 +1903,8 @@ static uint32_t do_csst(CPUS390XState *env, uint32_t r3, uint64_t a1,
if (parallel) {
#ifdef CONFIG_ATOMIC64
# ifdef CONFIG_USER_ONLY
uint64_t *haddr = g2h(env_cpu(env), a1);
ov = qatomic_cmpxchg__nocheck(haddr, cv, nv);
# else
TCGMemOpIdx oi = make_memop_idx(MO_TEQ | MO_ALIGN, mem_idx);
ov = helper_atomic_cmpxchgq_be_mmu(env, a1, cv, nv, oi, ra);
# endif
ov = cpu_atomic_cmpxchgq_be_mmu(env, a1, cv, nv, oi, ra);
#else
/* Note that we asserted !parallel above. */
g_assert_not_reached();
@ -1945,7 +1940,7 @@ static uint32_t do_csst(CPUS390XState *env, uint32_t r3, uint64_t a1,
cpu_stq_data_ra(env, a1 + 8, int128_getlo(nv), ra);
} else if (HAVE_CMPXCHG128) {
TCGMemOpIdx oi = make_memop_idx(MO_TEQ | MO_ALIGN_16, mem_idx);
ov = helper_atomic_cmpxchgo_be_mmu(env, a1, cv, nv, oi, ra);
ov = cpu_atomic_cmpxchgo_be_mmu(env, a1, cv, nv, oi, ra);
cc = !int128_eq(ov, cv);
} else {
/* Note that we asserted !parallel above. */
@ -1985,7 +1980,7 @@ static uint32_t do_csst(CPUS390XState *env, uint32_t r3, uint64_t a1,
} else if (HAVE_ATOMIC128) {
TCGMemOpIdx oi = make_memop_idx(MO_TEQ | MO_ALIGN_16, mem_idx);
Int128 sv = int128_make128(svl, svh);
helper_atomic_sto_be_mmu(env, a2, sv, oi, ra);
cpu_atomic_sto_be_mmu(env, a2, sv, oi, ra);
} else {
/* Note that we asserted !parallel above. */
g_assert_not_reached();
@ -2486,7 +2481,7 @@ uint64_t HELPER(lpq_parallel)(CPUS390XState *env, uint64_t addr)
mem_idx = cpu_mmu_index(env, false);
oi = make_memop_idx(MO_TEQ | MO_ALIGN_16, mem_idx);
v = helper_atomic_ldo_be_mmu(env, addr, oi, ra);
v = cpu_atomic_ldo_be_mmu(env, addr, oi, ra);
hi = int128_gethi(v);
lo = int128_getlo(v);
@ -2518,7 +2513,7 @@ void HELPER(stpq_parallel)(CPUS390XState *env, uint64_t addr,
mem_idx = cpu_mmu_index(env, false);
oi = make_memop_idx(MO_TEQ | MO_ALIGN_16, mem_idx);
v = int128_make128(low, high);
helper_atomic_sto_be_mmu(env, addr, v, oi, ra);
cpu_atomic_sto_be_mmu(env, addr, v, oi, ra);
}
/* Execute instruction. This instruction executes an insn modified with