Merge remote-tracking branch 'rth/tcg-pull' into staging

# By Richard Henderson
# Via Richard Henderson
* rth/tcg-pull:
  exec: Add both big- and little-endian memory helpers
  tcg: Add qemu_ld_st_i32/64
  tcg: Add TCGMemOp
  configure: Remove CONFIG_QEMU_LDST_OPTIMIZATION
  tcg: Add tcg-be-ldst.h
  tcg: Add tcg-be-null.h
  exec: Delete is_tcg_gen_code and GETRA_EXT
  tcg-aarch64: Update to helper_ret_*_mmu routines
  tcg: Merge tcg_register_helper into tcg_context_init
  tcg: Add tcg-runtime.c helpers to all_helpers
  tcg: Put target helper data into an array.
  tcg: Remove stray semi-colons from target-*/helper.h
  tcg: Move helper registration into tcg_context_init
  target-m68k: Rename helpers.h to helper.h
  tcg: Use a GHashTable for tcg_find_helper
  tcg: Delete tcg_helper_get_name declaration
  tcg-hppa: Remove tcg backend

Message-id: 1381440525-6666-1-git-send-email-rth@twiddle.net
Signed-off-by: Anthony Liguori <aliguori@amazon.com>
This commit is contained in:
Anthony Liguori 2013-10-11 09:36:52 -07:00
commit ab1eb72b1d
58 changed files with 992 additions and 2629 deletions

View File

@ -793,11 +793,6 @@ M: Andrzej Zaborowski <balrogg@gmail.com>
S: Maintained
F: tcg/arm/
HPPA target
M: Richard Henderson <rth@twiddle.net>
S: Maintained
F: tcg/hppa/
i386 target
M: qemu-devel@nongnu.org
S: Maintained

11
configure vendored
View File

@ -429,9 +429,6 @@ case "$cpu" in
aarch64)
cpu="aarch64"
;;
hppa|parisc|parisc64)
cpu="hppa"
;;
mips*)
cpu="mips"
;;
@ -3794,14 +3791,6 @@ echo "libs_softmmu=$libs_softmmu" >> $config_host_mak
echo "ARCH=$ARCH" >> $config_host_mak
case "$cpu" in
aarch64 | arm | i386 | x86_64 | x32 | ppc*)
# The TCG interpreter currently does not support ld/st optimization.
if test "$tcg_interpreter" = "no" ; then
echo "CONFIG_QEMU_LDST_OPTIMIZATION=y" >> $config_host_mak
fi
;;
esac
if test "$debug_tcg" = "yes" ; then
echo "CONFIG_DEBUG_TCG=y" >> $config_host_mak
fi

View File

@ -240,8 +240,7 @@ static inline void glue(gen_helper_, name)(dh_retvar_decl(ret) \
#elif GEN_HELPER == 2
/* Register helpers. */
#define DEF_HELPER_FLAGS_0(name, flags, ret) \
tcg_register_helper(HELPER(name), #name);
#define DEF_HELPER_FLAGS_0(name, flags, ret) { HELPER(name), #name },
#define DEF_HELPER_FLAGS_1(name, flags, ret, t1) \
DEF_HELPER_FLAGS_0(name, flags, ret)

View File

@ -320,36 +320,6 @@ extern uintptr_t tci_tb_ptr;
#define GETPC() (GETRA() - GETPC_ADJ)
/* The LDST optimizations splits code generation into fast and slow path.
In some implementations, we pass the "logical" return address manually;
in others, we must infer the logical return from the true return. */
#if defined(CONFIG_QEMU_LDST_OPTIMIZATION) && defined(CONFIG_SOFTMMU)
# if defined(__aarch64__)
# define GETRA_LDST(RA) tcg_getra_ldst(RA)
static inline uintptr_t tcg_getra_ldst(uintptr_t ra)
{
int32_t b;
ra += 4; /* skip one instruction */
b = *(int32_t *)ra; /* load the branch insn */
b = (b << 6) >> (6 - 2); /* extract the displacement */
ra += b; /* apply the displacement */
return ra;
}
# endif
#endif /* CONFIG_QEMU_LDST_OPTIMIZATION */
/* ??? Delete these once they are no longer used. */
bool is_tcg_gen_code(uintptr_t pc_ptr);
#ifdef GETRA_LDST
# define GETRA_EXT() tcg_getra_ext(GETRA())
static inline uintptr_t tcg_getra_ext(uintptr_t ra)
{
return is_tcg_gen_code(ra) ? GETRA_LDST(ra) : ra;
}
#else
# define GETRA_EXT() GETRA()
#endif
#if !defined(CONFIG_USER_ONLY)
void phys_mem_set_alloc(void *(*alloc)(ram_addr_t));

View File

@ -70,6 +70,48 @@
#define ADDR_READ addr_read
#endif
#if DATA_SIZE == 8
# define BSWAP(X) bswap64(X)
#elif DATA_SIZE == 4
# define BSWAP(X) bswap32(X)
#elif DATA_SIZE == 2
# define BSWAP(X) bswap16(X)
#else
# define BSWAP(X) (X)
#endif
#ifdef TARGET_WORDS_BIGENDIAN
# define TGT_BE(X) (X)
# define TGT_LE(X) BSWAP(X)
#else
# define TGT_BE(X) BSWAP(X)
# define TGT_LE(X) (X)
#endif
#if DATA_SIZE == 1
# define helper_le_ld_name glue(glue(helper_ret_ld, USUFFIX), MMUSUFFIX)
# define helper_be_ld_name helper_le_ld_name
# define helper_le_lds_name glue(glue(helper_ret_ld, SSUFFIX), MMUSUFFIX)
# define helper_be_lds_name helper_le_lds_name
# define helper_le_st_name glue(glue(helper_ret_st, SUFFIX), MMUSUFFIX)
# define helper_be_st_name helper_le_st_name
#else
# define helper_le_ld_name glue(glue(helper_le_ld, USUFFIX), MMUSUFFIX)
# define helper_be_ld_name glue(glue(helper_be_ld, USUFFIX), MMUSUFFIX)
# define helper_le_lds_name glue(glue(helper_le_ld, SSUFFIX), MMUSUFFIX)
# define helper_be_lds_name glue(glue(helper_be_ld, SSUFFIX), MMUSUFFIX)
# define helper_le_st_name glue(glue(helper_le_st, SUFFIX), MMUSUFFIX)
# define helper_be_st_name glue(glue(helper_be_st, SUFFIX), MMUSUFFIX)
#endif
#ifdef TARGET_WORDS_BIGENDIAN
# define helper_te_ld_name helper_be_ld_name
# define helper_te_st_name helper_be_st_name
#else
# define helper_te_ld_name helper_le_ld_name
# define helper_te_st_name helper_le_st_name
#endif
static inline DATA_TYPE glue(io_read, SUFFIX)(CPUArchState *env,
hwaddr physaddr,
target_ulong addr,
@ -89,18 +131,16 @@ static inline DATA_TYPE glue(io_read, SUFFIX)(CPUArchState *env,
return val;
}
/* handle all cases except unaligned access which span two pages */
#ifdef SOFTMMU_CODE_ACCESS
static
static __attribute__((unused))
#endif
WORD_TYPE
glue(glue(helper_ret_ld, USUFFIX), MMUSUFFIX)(CPUArchState *env,
target_ulong addr, int mmu_idx,
uintptr_t retaddr)
WORD_TYPE helper_le_ld_name(CPUArchState *env, target_ulong addr, int mmu_idx,
uintptr_t retaddr)
{
int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
target_ulong tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ;
uintptr_t haddr;
DATA_TYPE res;
/* Adjust the given return address. */
retaddr -= GETPC_ADJ;
@ -124,7 +164,12 @@ glue(glue(helper_ret_ld, USUFFIX), MMUSUFFIX)(CPUArchState *env,
goto do_unaligned_access;
}
ioaddr = env->iotlb[mmu_idx][index];
return glue(io_read, SUFFIX)(env, ioaddr, addr, retaddr);
/* ??? Note that the io helpers always read data in the target
byte ordering. We should push the LE/BE request down into io. */
res = glue(io_read, SUFFIX)(env, ioaddr, addr, retaddr);
res = TGT_LE(res);
return res;
}
/* Handle slow unaligned access (it spans two pages or IO). */
@ -132,7 +177,7 @@ glue(glue(helper_ret_ld, USUFFIX), MMUSUFFIX)(CPUArchState *env,
&& unlikely((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1
>= TARGET_PAGE_SIZE)) {
target_ulong addr1, addr2;
DATA_TYPE res1, res2, res;
DATA_TYPE res1, res2;
unsigned shift;
do_unaligned_access:
#ifdef ALIGNED_ONLY
@ -142,16 +187,12 @@ glue(glue(helper_ret_ld, USUFFIX), MMUSUFFIX)(CPUArchState *env,
addr2 = addr1 + DATA_SIZE;
/* Note the adjustment at the beginning of the function.
Undo that for the recursion. */
res1 = glue(glue(helper_ret_ld, USUFFIX), MMUSUFFIX)
(env, addr1, mmu_idx, retaddr + GETPC_ADJ);
res2 = glue(glue(helper_ret_ld, USUFFIX), MMUSUFFIX)
(env, addr2, mmu_idx, retaddr + GETPC_ADJ);
res1 = helper_le_ld_name(env, addr1, mmu_idx, retaddr + GETPC_ADJ);
res2 = helper_le_ld_name(env, addr2, mmu_idx, retaddr + GETPC_ADJ);
shift = (addr & (DATA_SIZE - 1)) * 8;
#ifdef TARGET_WORDS_BIGENDIAN
res = (res1 << shift) | (res2 >> ((DATA_SIZE * 8) - shift));
#else
/* Little-endian combine. */
res = (res1 >> shift) | (res2 << ((DATA_SIZE * 8) - shift));
#endif
return res;
}
@ -163,16 +204,98 @@ glue(glue(helper_ret_ld, USUFFIX), MMUSUFFIX)(CPUArchState *env,
#endif
haddr = addr + env->tlb_table[mmu_idx][index].addend;
/* Note that ldl_raw is defined with type "int". */
return (DATA_TYPE) glue(glue(ld, LSUFFIX), _raw)((uint8_t *)haddr);
#if DATA_SIZE == 1
res = glue(glue(ld, LSUFFIX), _p)((uint8_t *)haddr);
#else
res = glue(glue(ld, LSUFFIX), _le_p)((uint8_t *)haddr);
#endif
return res;
}
#if DATA_SIZE > 1
#ifdef SOFTMMU_CODE_ACCESS
static __attribute__((unused))
#endif
WORD_TYPE helper_be_ld_name(CPUArchState *env, target_ulong addr, int mmu_idx,
uintptr_t retaddr)
{
int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
target_ulong tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ;
uintptr_t haddr;
DATA_TYPE res;
/* Adjust the given return address. */
retaddr -= GETPC_ADJ;
/* If the TLB entry is for a different page, reload and try again. */
if ((addr & TARGET_PAGE_MASK)
!= (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
#ifdef ALIGNED_ONLY
if ((addr & (DATA_SIZE - 1)) != 0) {
do_unaligned_access(env, addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
}
#endif
tlb_fill(env, addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ;
}
/* Handle an IO access. */
if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) {
hwaddr ioaddr;
if ((addr & (DATA_SIZE - 1)) != 0) {
goto do_unaligned_access;
}
ioaddr = env->iotlb[mmu_idx][index];
/* ??? Note that the io helpers always read data in the target
byte ordering. We should push the LE/BE request down into io. */
res = glue(io_read, SUFFIX)(env, ioaddr, addr, retaddr);
res = TGT_BE(res);
return res;
}
/* Handle slow unaligned access (it spans two pages or IO). */
if (DATA_SIZE > 1
&& unlikely((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1
>= TARGET_PAGE_SIZE)) {
target_ulong addr1, addr2;
DATA_TYPE res1, res2;
unsigned shift;
do_unaligned_access:
#ifdef ALIGNED_ONLY
do_unaligned_access(env, addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
#endif
addr1 = addr & ~(DATA_SIZE - 1);
addr2 = addr1 + DATA_SIZE;
/* Note the adjustment at the beginning of the function.
Undo that for the recursion. */
res1 = helper_be_ld_name(env, addr1, mmu_idx, retaddr + GETPC_ADJ);
res2 = helper_be_ld_name(env, addr2, mmu_idx, retaddr + GETPC_ADJ);
shift = (addr & (DATA_SIZE - 1)) * 8;
/* Big-endian combine. */
res = (res1 << shift) | (res2 >> ((DATA_SIZE * 8) - shift));
return res;
}
/* Handle aligned access or unaligned access in the same page. */
#ifdef ALIGNED_ONLY
if ((addr & (DATA_SIZE - 1)) != 0) {
do_unaligned_access(env, addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
}
#endif
haddr = addr + env->tlb_table[mmu_idx][index].addend;
res = glue(glue(ld, LSUFFIX), _be_p)((uint8_t *)haddr);
return res;
}
#endif /* DATA_SIZE > 1 */
DATA_TYPE
glue(glue(helper_ld, SUFFIX), MMUSUFFIX)(CPUArchState *env, target_ulong addr,
int mmu_idx)
{
return glue(glue(helper_ret_ld, USUFFIX), MMUSUFFIX)(env, addr, mmu_idx,
GETRA_EXT());
return helper_te_ld_name (env, addr, mmu_idx, GETRA());
}
#ifndef SOFTMMU_CODE_ACCESS
@ -180,14 +303,19 @@ glue(glue(helper_ld, SUFFIX), MMUSUFFIX)(CPUArchState *env, target_ulong addr,
/* Provide signed versions of the load routines as well. We can of course
avoid this for 64-bit data, or for 32-bit data on 32-bit host. */
#if DATA_SIZE * 8 < TCG_TARGET_REG_BITS
WORD_TYPE
glue(glue(helper_ret_ld, SSUFFIX), MMUSUFFIX)(CPUArchState *env,
target_ulong addr, int mmu_idx,
uintptr_t retaddr)
WORD_TYPE helper_le_lds_name(CPUArchState *env, target_ulong addr,
int mmu_idx, uintptr_t retaddr)
{
return (SDATA_TYPE) glue(glue(helper_ret_ld, USUFFIX), MMUSUFFIX)
(env, addr, mmu_idx, retaddr);
return (SDATA_TYPE)helper_le_ld_name(env, addr, mmu_idx, retaddr);
}
# if DATA_SIZE > 1
WORD_TYPE helper_be_lds_name(CPUArchState *env, target_ulong addr,
int mmu_idx, uintptr_t retaddr)
{
return (SDATA_TYPE)helper_be_ld_name(env, addr, mmu_idx, retaddr);
}
# endif
#endif
static inline void glue(io_write, SUFFIX)(CPUArchState *env,
@ -208,10 +336,8 @@ static inline void glue(io_write, SUFFIX)(CPUArchState *env,
io_mem_write(mr, physaddr, val, 1 << SHIFT);
}
void
glue(glue(helper_ret_st, SUFFIX), MMUSUFFIX)(CPUArchState *env,
target_ulong addr, DATA_TYPE val,
int mmu_idx, uintptr_t retaddr)
void helper_le_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
int mmu_idx, uintptr_t retaddr)
{
int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
target_ulong tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
@ -239,6 +365,10 @@ glue(glue(helper_ret_st, SUFFIX), MMUSUFFIX)(CPUArchState *env,
goto do_unaligned_access;
}
ioaddr = env->iotlb[mmu_idx][index];
/* ??? Note that the io helpers always read data in the target
byte ordering. We should push the LE/BE request down into io. */
val = TGT_LE(val);
glue(io_write, SUFFIX)(env, ioaddr, val, addr, retaddr);
return;
}
@ -256,11 +386,8 @@ glue(glue(helper_ret_st, SUFFIX), MMUSUFFIX)(CPUArchState *env,
/* Note: relies on the fact that tlb_fill() does not remove the
* previous page from the TLB cache. */
for (i = DATA_SIZE - 1; i >= 0; i--) {
#ifdef TARGET_WORDS_BIGENDIAN
uint8_t val8 = val >> (((DATA_SIZE - 1) * 8) - (i * 8));
#else
/* Little-endian extract. */
uint8_t val8 = val >> (i * 8);
#endif
/* Note the adjustment at the beginning of the function.
Undo that for the recursion. */
glue(helper_ret_stb, MMUSUFFIX)(env, addr + i, val8,
@ -277,15 +404,91 @@ glue(glue(helper_ret_st, SUFFIX), MMUSUFFIX)(CPUArchState *env,
#endif
haddr = addr + env->tlb_table[mmu_idx][index].addend;
glue(glue(st, SUFFIX), _raw)((uint8_t *)haddr, val);
#if DATA_SIZE == 1
glue(glue(st, SUFFIX), _p)((uint8_t *)haddr, val);
#else
glue(glue(st, SUFFIX), _le_p)((uint8_t *)haddr, val);
#endif
}
#if DATA_SIZE > 1
void helper_be_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
int mmu_idx, uintptr_t retaddr)
{
int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
target_ulong tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
uintptr_t haddr;
/* Adjust the given return address. */
retaddr -= GETPC_ADJ;
/* If the TLB entry is for a different page, reload and try again. */
if ((addr & TARGET_PAGE_MASK)
!= (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
#ifdef ALIGNED_ONLY
if ((addr & (DATA_SIZE - 1)) != 0) {
do_unaligned_access(env, addr, 1, mmu_idx, retaddr);
}
#endif
tlb_fill(env, addr, 1, mmu_idx, retaddr);
tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
}
/* Handle an IO access. */
if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) {
hwaddr ioaddr;
if ((addr & (DATA_SIZE - 1)) != 0) {
goto do_unaligned_access;
}
ioaddr = env->iotlb[mmu_idx][index];
/* ??? Note that the io helpers always read data in the target
byte ordering. We should push the LE/BE request down into io. */
val = TGT_BE(val);
glue(io_write, SUFFIX)(env, ioaddr, val, addr, retaddr);
return;
}
/* Handle slow unaligned access (it spans two pages or IO). */
if (DATA_SIZE > 1
&& unlikely((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1
>= TARGET_PAGE_SIZE)) {
int i;
do_unaligned_access:
#ifdef ALIGNED_ONLY
do_unaligned_access(env, addr, 1, mmu_idx, retaddr);
#endif
/* XXX: not efficient, but simple */
/* Note: relies on the fact that tlb_fill() does not remove the
* previous page from the TLB cache. */
for (i = DATA_SIZE - 1; i >= 0; i--) {
/* Big-endian extract. */
uint8_t val8 = val >> (((DATA_SIZE - 1) * 8) - (i * 8));
/* Note the adjustment at the beginning of the function.
Undo that for the recursion. */
glue(helper_ret_stb, MMUSUFFIX)(env, addr + i, val8,
mmu_idx, retaddr + GETPC_ADJ);
}
return;
}
/* Handle aligned access or unaligned access in the same page. */
#ifdef ALIGNED_ONLY
if ((addr & (DATA_SIZE - 1)) != 0) {
do_unaligned_access(env, addr, 1, mmu_idx, retaddr);
}
#endif
haddr = addr + env->tlb_table[mmu_idx][index].addend;
glue(glue(st, SUFFIX), _be_p)((uint8_t *)haddr, val);
}
#endif /* DATA_SIZE > 1 */
void
glue(glue(helper_st, SUFFIX), MMUSUFFIX)(CPUArchState *env, target_ulong addr,
DATA_TYPE val, int mmu_idx)
{
glue(glue(helper_ret_st, SUFFIX), MMUSUFFIX)(env, addr, val, mmu_idx,
GETRA_EXT());
helper_te_st_name(env, addr, val, mmu_idx, GETRA());
}
#endif /* !defined(SOFTMMU_CODE_ACCESS) */
@ -301,3 +504,16 @@ glue(glue(helper_st, SUFFIX), MMUSUFFIX)(CPUArchState *env, target_ulong addr,
#undef SDATA_TYPE
#undef USUFFIX
#undef SSUFFIX
#undef BSWAP
#undef TGT_BE
#undef TGT_LE
#undef CPU_BE
#undef CPU_LE
#undef helper_le_ld_name
#undef helper_be_ld_name
#undef helper_le_lds_name
#undef helper_be_lds_name
#undef helper_le_st_name
#undef helper_be_st_name
#undef helper_te_ld_name
#undef helper_te_st_name

View File

@ -114,7 +114,7 @@ DEF_HELPER_FLAGS_1(tbia, TCG_CALL_NO_RWG, void, env)
DEF_HELPER_FLAGS_2(tbis, TCG_CALL_NO_RWG, void, env, i64)
DEF_HELPER_FLAGS_1(tb_flush, TCG_CALL_NO_RWG, void, env)
DEF_HELPER_1(halt, void, i64);
DEF_HELPER_1(halt, void, i64)
DEF_HELPER_FLAGS_0(get_vmtime, TCG_CALL_NO_RWG, i64)
DEF_HELPER_FLAGS_0(get_walltime, TCG_CALL_NO_RWG, i64)

View File

@ -140,10 +140,6 @@ void alpha_translate_init(void)
offsetof(CPUAlphaState, usp), "usp");
#endif
/* register helpers */
#define GEN_HELPER 2
#include "helper.h"
done_init = 1;
}

View File

@ -247,10 +247,10 @@ DEF_HELPER_3(neon_qshl_u32, i32, env, i32, i32)
DEF_HELPER_3(neon_qshl_s32, i32, env, i32, i32)
DEF_HELPER_3(neon_qshl_u64, i64, env, i64, i64)
DEF_HELPER_3(neon_qshl_s64, i64, env, i64, i64)
DEF_HELPER_3(neon_qshlu_s8, i32, env, i32, i32);
DEF_HELPER_3(neon_qshlu_s16, i32, env, i32, i32);
DEF_HELPER_3(neon_qshlu_s32, i32, env, i32, i32);
DEF_HELPER_3(neon_qshlu_s64, i64, env, i64, i64);
DEF_HELPER_3(neon_qshlu_s8, i32, env, i32, i32)
DEF_HELPER_3(neon_qshlu_s16, i32, env, i32, i32)
DEF_HELPER_3(neon_qshlu_s32, i32, env, i32, i32)
DEF_HELPER_3(neon_qshlu_s64, i64, env, i64, i64)
DEF_HELPER_3(neon_qrshl_u8, i32, env, i32, i32)
DEF_HELPER_3(neon_qrshl_s8, i32, env, i32, i32)
DEF_HELPER_3(neon_qrshl_u16, i32, env, i32, i32)

View File

@ -115,9 +115,6 @@ void arm_translate_init(void)
#endif
a64_translate_init();
#define GEN_HELPER 2
#include "helper.h"
}
static inline TCGv_i32 load_cpu_offset(int offset)

View File

@ -4,14 +4,14 @@ DEF_HELPER_2(raise_exception, void, env, i32)
DEF_HELPER_2(tlb_flush_pid, void, env, i32)
DEF_HELPER_2(spc_write, void, env, i32)
DEF_HELPER_3(dump, void, i32, i32, i32)
DEF_HELPER_1(rfe, void, env);
DEF_HELPER_1(rfn, void, env);
DEF_HELPER_1(rfe, void, env)
DEF_HELPER_1(rfn, void, env)
DEF_HELPER_3(movl_sreg_reg, void, env, i32, i32)
DEF_HELPER_3(movl_reg_sreg, void, env, i32, i32)
DEF_HELPER_FLAGS_1(lz, TCG_CALL_NO_SE, i32, i32);
DEF_HELPER_FLAGS_4(btst, TCG_CALL_NO_SE, i32, env, i32, i32, i32);
DEF_HELPER_FLAGS_1(lz, TCG_CALL_NO_SE, i32, i32)
DEF_HELPER_FLAGS_4(btst, TCG_CALL_NO_SE, i32, env, i32, i32, i32)
DEF_HELPER_FLAGS_4(evaluate_flags_muls, TCG_CALL_NO_SE, i32, env, i32, i32, i32)
DEF_HELPER_FLAGS_4(evaluate_flags_mulu, TCG_CALL_NO_SE, i32, env, i32, i32, i32)

View File

@ -3480,9 +3480,6 @@ void cris_initialize_tcg(void)
{
int i;
#define GEN_HELPER 2
#include "helper.h"
cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
cc_x = tcg_global_mem_new(TCG_AREG0,
offsetof(CPUCRISState, cc_x), "cc_x");

View File

@ -8261,10 +8261,6 @@ void optimize_flags_init(void)
cpu_regs[R_EDI] = tcg_global_mem_new_i32(TCG_AREG0,
offsetof(CPUX86State, regs[R_EDI]), "edi");
#endif
/* register helpers */
#define GEN_HELPER 2
#include "helper.h"
}
/* generate intermediate code in gen_opc_buf and gen_opparam_buf for

View File

@ -21,7 +21,7 @@
#include "cpu.h"
#include "exec/gdbstub.h"
#include "helpers.h"
#include "helper.h"
#define SIGNBIT (1u << 31)

View File

@ -17,7 +17,7 @@
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
*/
#include "cpu.h"
#include "helpers.h"
#include "helper.h"
#if defined(CONFIG_USER_ONLY)

View File

@ -23,9 +23,9 @@
#include "tcg-op.h"
#include "qemu/log.h"
#include "helpers.h"
#include "helper.h"
#define GEN_HELPER 1
#include "helpers.h"
#include "helper.h"
//#define DEBUG_DISPATCH 1
@ -108,9 +108,6 @@ void m68k_tcg_init(void)
NULL_QREG = tcg_global_mem_new(TCG_AREG0, -4, "NULL");
store_dummy = tcg_global_mem_new(TCG_AREG0, -8, "NULL");
#define GEN_HELPER 2
#include "helpers.h"
}
static inline void qemu_assert(int cond, const char *msg)

View File

@ -2024,8 +2024,6 @@ void mb_tcg_init(void)
offsetof(CPUMBState, sregs[i]),
special_regnames[i]);
}
#define GEN_HELPER 2
#include "helper.h"
}
void restore_state_to_opc(CPUMBState *env, TranslationBlock *tb, int pc_pos)

View File

@ -148,7 +148,7 @@ DEF_HELPER_2(mtc0_taghi, void, env, tl)
DEF_HELPER_2(mtc0_datahi, void, env, tl)
/* MIPS MT functions */
DEF_HELPER_2(mftgpr, tl, env, i32);
DEF_HELPER_2(mftgpr, tl, env, i32)
DEF_HELPER_2(mftlo, tl, env, i32)
DEF_HELPER_2(mfthi, tl, env, i32)
DEF_HELPER_2(mftacx, tl, env, i32)
@ -165,11 +165,11 @@ DEF_HELPER_1(evpe, tl, env)
#endif /* !CONFIG_USER_ONLY */
/* microMIPS functions */
DEF_HELPER_4(lwm, void, env, tl, tl, i32);
DEF_HELPER_4(swm, void, env, tl, tl, i32);
DEF_HELPER_4(lwm, void, env, tl, tl, i32)
DEF_HELPER_4(swm, void, env, tl, tl, i32)
#ifdef TARGET_MIPS64
DEF_HELPER_4(ldm, void, env, tl, tl, i32);
DEF_HELPER_4(sdm, void, env, tl, tl, i32);
DEF_HELPER_4(ldm, void, env, tl, tl, i32)
DEF_HELPER_4(sdm, void, env, tl, tl, i32)
#endif
DEF_HELPER_2(fork, void, tl, tl)
@ -615,7 +615,7 @@ DEF_HELPER_FLAGS_4(dmsubu, 0, void, tl, tl, i32, env)
DEF_HELPER_FLAGS_1(bitrev, TCG_CALL_NO_RWG_SE, tl, tl)
DEF_HELPER_FLAGS_3(insv, 0, tl, env, tl, tl)
#if defined(TARGET_MIPS64)
DEF_HELPER_FLAGS_3(dinsv, 0, tl, env, tl, tl);
DEF_HELPER_FLAGS_3(dinsv, 0, tl, env, tl, tl)
#endif
/* DSP Compare-Pick Sub-class insns */

View File

@ -15886,10 +15886,6 @@ void mips_tcg_init(void)
offsetof(CPUMIPSState, active_fpu.fcr31),
"fcr31");
/* register helpers */
#define GEN_HELPER 2
#include "helper.h"
inited = 1;
}

View File

@ -110,8 +110,6 @@ void openrisc_translate_init(void)
offsetof(CPUOpenRISCState, gpr[i]),
regnames[i]);
}
#define GEN_HELPER 2
#include "helper.h"
}
/* Writeback SR_F transaltion-space to execution-space. */

View File

@ -168,8 +168,8 @@ DEF_HELPER_3(vslo, void, avr, avr, avr)
DEF_HELPER_3(vsro, void, avr, avr, avr)
DEF_HELPER_3(vaddcuw, void, avr, avr, avr)
DEF_HELPER_3(vsubcuw, void, avr, avr, avr)
DEF_HELPER_2(lvsl, void, avr, tl);
DEF_HELPER_2(lvsr, void, avr, tl);
DEF_HELPER_2(lvsl, void, avr, tl)
DEF_HELPER_2(lvsr, void, avr, tl)
DEF_HELPER_4(vaddsbs, void, env, avr, avr, avr)
DEF_HELPER_4(vaddshs, void, env, avr, avr, avr)
DEF_HELPER_4(vaddsws, void, env, avr, avr, avr)
@ -220,7 +220,7 @@ DEF_HELPER_5(vmsumuhs, void, env, avr, avr, avr, avr)
DEF_HELPER_5(vmsumshm, void, env, avr, avr, avr, avr)
DEF_HELPER_5(vmsumshs, void, env, avr, avr, avr, avr)
DEF_HELPER_4(vmladduhm, void, avr, avr, avr, avr)
DEF_HELPER_2(mtvscr, void, env, avr);
DEF_HELPER_2(mtvscr, void, env, avr)
DEF_HELPER_3(lvebx, void, env, avr, tl)
DEF_HELPER_3(lvehx, void, env, avr, tl)
DEF_HELPER_3(lvewx, void, env, avr, tl)
@ -349,7 +349,7 @@ DEF_HELPER_2(load_slb_vsid, tl, env, tl)
DEF_HELPER_FLAGS_1(slbia, TCG_CALL_NO_RWG, void, env)
DEF_HELPER_FLAGS_2(slbie, TCG_CALL_NO_RWG, void, env, tl)
#endif
DEF_HELPER_FLAGS_2(load_sr, TCG_CALL_NO_RWG, tl, env, tl);
DEF_HELPER_FLAGS_2(load_sr, TCG_CALL_NO_RWG, tl, env, tl)
DEF_HELPER_FLAGS_3(store_sr, TCG_CALL_NO_RWG, void, env, tl, tl)
DEF_HELPER_FLAGS_1(602_mfrom, TCG_CALL_NO_RWG_SE, tl, tl)
@ -367,7 +367,7 @@ DEF_HELPER_3(divo, tl, env, tl, tl)
DEF_HELPER_3(divs, tl, env, tl, tl)
DEF_HELPER_3(divso, tl, env, tl, tl)
DEF_HELPER_2(load_dcr, tl, env, tl);
DEF_HELPER_2(load_dcr, tl, env, tl)
DEF_HELPER_3(store_dcr, void, env, tl, tl)
DEF_HELPER_2(load_dump_spr, void, env, i32)

View File

@ -175,10 +175,6 @@ void ppc_translate_init(void)
cpu_access_type = tcg_global_mem_new_i32(TCG_AREG0,
offsetof(CPUPPCState, access_type), "access_type");
/* register helpers */
#define GEN_HELPER 2
#include "helper.h"
done_init = 1;
}

View File

@ -188,10 +188,6 @@ void s390x_translate_init(void)
offsetof(CPUS390XState, fregs[i].d),
cpu_reg_names[i + 16]);
}
/* register helpers */
#define GEN_HELPER 2
#include "helper.h"
}
static TCGv_i64 load_reg(int reg)

View File

@ -143,10 +143,6 @@ void sh4_translate_init(void)
offsetof(CPUSH4State, fregs[i]),
fregnames[i]);
/* register helpers */
#define GEN_HELPER 2
#include "helper.h"
done_init = 1;
}

View File

@ -103,7 +103,7 @@ DEF_HELPER_3(fmuls, f32, env, f32, f32)
DEF_HELPER_3(fdivs, f32, env, f32, f32)
DEF_HELPER_3(fsmuld, f64, env, f32, f32)
DEF_HELPER_3(fdmulq, void, env, f64, f64);
DEF_HELPER_3(fdmulq, void, env, f64, f64)
DEF_HELPER_FLAGS_1(fnegs, TCG_CALL_NO_RWG_SE, f32, f32)
DEF_HELPER_2(fitod, f64, env, s32)
@ -156,22 +156,22 @@ DEF_HELPER_FLAGS_3(bshuffle, TCG_CALL_NO_RWG_SE, i64, i64, i64, i64)
DEF_HELPER_FLAGS_2(f ## name ## 32s, TCG_CALL_NO_RWG_SE, \
i32, i32, i32)
VIS_HELPER(padd);
VIS_HELPER(psub);
VIS_HELPER(padd)
VIS_HELPER(psub)
#define VIS_CMPHELPER(name) \
DEF_HELPER_FLAGS_2(f##name##16, TCG_CALL_NO_RWG_SE, \
i64, i64, i64) \
DEF_HELPER_FLAGS_2(f##name##32, TCG_CALL_NO_RWG_SE, \
i64, i64, i64)
VIS_CMPHELPER(cmpgt);
VIS_CMPHELPER(cmpeq);
VIS_CMPHELPER(cmple);
VIS_CMPHELPER(cmpne);
VIS_CMPHELPER(cmpgt)
VIS_CMPHELPER(cmpeq)
VIS_CMPHELPER(cmple)
VIS_CMPHELPER(cmpne)
#endif
#undef F_HELPER_0_1
#undef VIS_HELPER
#undef VIS_CMPHELPER
DEF_HELPER_1(compute_psr, void, env);
DEF_HELPER_1(compute_C_icc, i32, env);
DEF_HELPER_1(compute_psr, void, env)
DEF_HELPER_1(compute_C_icc, i32, env)
#include "exec/def-helper.h"

View File

@ -5456,11 +5456,6 @@ void gen_intermediate_code_init(CPUSPARCState *env)
offsetof(CPUSPARCState, fpr[i]),
fregnames[i]);
}
/* register helpers */
#define GEN_HELPER 2
#include "helper.h"
}
}

View File

@ -74,9 +74,6 @@ void uc32_translate_init(void)
cpu_R[i] = tcg_global_mem_new_i32(TCG_AREG0,
offsetof(CPUUniCore32State, regs[i]), regnames[i]);
}
#define GEN_HELPER 2
#include "helper.h"
}
static int num_temps;

View File

@ -238,8 +238,6 @@ void xtensa_translate_init(void)
uregnames[i].name);
}
}
#define GEN_HELPER 2
#include "helper.h"
}
static inline bool option_bits_enabled(DisasContext *dc, uint64_t opt)

View File

@ -412,30 +412,25 @@ current TB was linked to this TB. Otherwise execute the next
instructions. Only indices 0 and 1 are valid and tcg_gen_goto_tb may be issued
at most once with each slot index per TB.
* qemu_ld8u t0, t1, flags
qemu_ld8s t0, t1, flags
qemu_ld16u t0, t1, flags
qemu_ld16s t0, t1, flags
qemu_ld32 t0, t1, flags
qemu_ld32u t0, t1, flags
qemu_ld32s t0, t1, flags
qemu_ld64 t0, t1, flags
* qemu_ld_i32/i64 t0, t1, flags, memidx
* qemu_st_i32/i64 t0, t1, flags, memidx
Load data at the QEMU CPU address t1 into t0. t1 has the QEMU CPU address
type. 'flags' contains the QEMU memory index (selects user or kernel access)
for example.
Load data at the guest address t1 into t0, or store data in t0 at guest
address t1. The _i32/_i64 size applies to the size of the input/output
register t0 only. The address t1 is always sized according to the guest,
and the width of the memory operation is controlled by flags.
Note that "qemu_ld32" implies a 32-bit result, while "qemu_ld32u" and
"qemu_ld32s" imply a 64-bit result appropriately extended from 32 bits.
Both t0 and t1 may be split into little-endian ordered pairs of registers
if dealing with 64-bit quantities on a 32-bit host.
* qemu_st8 t0, t1, flags
qemu_st16 t0, t1, flags
qemu_st32 t0, t1, flags
qemu_st64 t0, t1, flags
The memidx selects the qemu tlb index to use (e.g. user or kernel access).
The flags are the TCGMemOp bits, selecting the sign, width, and endianness
of the memory access.
Store the data t0 at the QEMU CPU Address t1. t1 has the QEMU CPU
address type. 'flags' contains the QEMU memory index (selects user or
kernel access) for example.
For a 32-bit host, qemu_ld/st_i64 is guaranteed to only be used with a
64-bit memory access specified in flags.
*********
Note 1: Some shortcuts are defined when the last operand is known to be
a constant (e.g. addi for add, movi for mov).

View File

@ -10,6 +10,7 @@
* See the COPYING file in the top-level directory for details.
*/
#include "tcg-be-ldst.h"
#include "qemu/bitops.h"
#ifndef NDEBUG
@ -778,22 +779,24 @@ static inline void tcg_out_nop(TCGContext *s)
}
#ifdef CONFIG_SOFTMMU
/* helper signature: helper_ld_mmu(CPUState *env, target_ulong addr,
int mmu_idx) */
/* helper signature: helper_ret_ld_mmu(CPUState *env, target_ulong addr,
* int mmu_idx, uintptr_t ra)
*/
static const void * const qemu_ld_helpers[4] = {
helper_ldb_mmu,
helper_ldw_mmu,
helper_ldl_mmu,
helper_ldq_mmu,
helper_ret_ldub_mmu,
helper_ret_lduw_mmu,
helper_ret_ldul_mmu,
helper_ret_ldq_mmu,
};
/* helper signature: helper_st_mmu(CPUState *env, target_ulong addr,
uintxx_t val, int mmu_idx) */
/* helper signature: helper_ret_st_mmu(CPUState *env, target_ulong addr,
* uintxx_t val, int mmu_idx, uintptr_t ra)
*/
static const void * const qemu_st_helpers[4] = {
helper_stb_mmu,
helper_stw_mmu,
helper_stl_mmu,
helper_stq_mmu,
helper_ret_stb_mmu,
helper_ret_stw_mmu,
helper_ret_stl_mmu,
helper_ret_stq_mmu,
};
static void tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
@ -802,6 +805,7 @@ static void tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
tcg_out_movr(s, 1, TCG_REG_X0, TCG_AREG0);
tcg_out_movr(s, (TARGET_LONG_BITS == 64), TCG_REG_X1, lb->addrlo_reg);
tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_X2, lb->mem_index);
tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_X3, (tcg_target_long)lb->raddr);
tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_TMP,
(tcg_target_long)qemu_ld_helpers[lb->opc & 3]);
tcg_out_callr(s, TCG_REG_TMP);
@ -822,6 +826,7 @@ static void tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
tcg_out_movr(s, (TARGET_LONG_BITS == 64), TCG_REG_X1, lb->addrlo_reg);
tcg_out_movr(s, 1, TCG_REG_X2, lb->datalo_reg);
tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_X3, lb->mem_index);
tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_X4, (tcg_target_long)lb->raddr);
tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_TMP,
(tcg_target_long)qemu_st_helpers[lb->opc & 3]);
tcg_out_callr(s, TCG_REG_TMP);
@ -830,33 +835,13 @@ static void tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
tcg_out_goto(s, (tcg_target_long)lb->raddr);
}
void tcg_out_tb_finalize(TCGContext *s)
{
int i;
for (i = 0; i < s->nb_qemu_ldst_labels; i++) {
TCGLabelQemuLdst *label = &s->qemu_ldst_labels[i];
if (label->is_ld) {
tcg_out_qemu_ld_slow_path(s, label);
} else {
tcg_out_qemu_st_slow_path(s, label);
}
}
}
static void add_qemu_ldst_label(TCGContext *s, int is_ld, int opc,
TCGReg data_reg, TCGReg addr_reg,
int mem_index,
uint8_t *raddr, uint8_t *label_ptr)
{
int idx;
TCGLabelQemuLdst *label;
TCGLabelQemuLdst *label = new_ldst_label(s);
if (s->nb_qemu_ldst_labels >= TCG_MAX_QEMU_LDST) {
tcg_abort();
}
idx = s->nb_qemu_ldst_labels++;
label = &s->qemu_ldst_labels[idx];
label->is_ld = is_ld;
label->opc = opc;
label->datalo_reg = data_reg;

View File

@ -96,6 +96,8 @@ enum {
TCG_AREG0 = TCG_REG_X19,
};
#define TCG_TARGET_HAS_new_ldst 0
static inline void flush_icache_range(uintptr_t start, uintptr_t stop)
{
__builtin___clear_cache((char *)start, (char *)stop);

View File

@ -22,6 +22,8 @@
* THE SOFTWARE.
*/
#include "tcg-be-ldst.h"
/* The __ARM_ARCH define is provided by gcc 4.8. Construct it otherwise. */
#ifndef __ARM_ARCH
# if defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__) \
@ -1243,15 +1245,8 @@ static void add_qemu_ldst_label(TCGContext *s, int is_ld, int opc,
int addrhi_reg, int mem_index,
uint8_t *raddr, uint8_t *label_ptr)
{
int idx;
TCGLabelQemuLdst *label;
TCGLabelQemuLdst *label = new_ldst_label(s);
if (s->nb_qemu_ldst_labels >= TCG_MAX_QEMU_LDST) {
tcg_abort();
}
idx = s->nb_qemu_ldst_labels++;
label = (TCGLabelQemuLdst *)&s->qemu_ldst_labels[idx];
label->is_ld = is_ld;
label->opc = opc;
label->datalo_reg = data_reg;
@ -1968,22 +1963,6 @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
}
}
#ifdef CONFIG_SOFTMMU
/* Generate TB finalization at the end of block. */
void tcg_out_tb_finalize(TCGContext *s)
{
int i;
for (i = 0; i < s->nb_qemu_ldst_labels; i++) {
TCGLabelQemuLdst *label = &s->qemu_ldst_labels[i];
if (label->is_ld) {
tcg_out_qemu_ld_slow_path(s, label);
} else {
tcg_out_qemu_st_slow_path(s, label);
}
}
}
#endif /* SOFTMMU */
static const TCGTargetOpDef arm_op_defs[] = {
{ INDEX_op_exit_tb, { } },
{ INDEX_op_goto_tb, { } },

View File

@ -85,6 +85,8 @@ extern bool use_idiv_instructions;
#define TCG_TARGET_HAS_div_i32 use_idiv_instructions
#define TCG_TARGET_HAS_rem_i32 0
#define TCG_TARGET_HAS_new_ldst 0
extern bool tcg_target_deposit_valid(int ofs, int len);
#define TCG_TARGET_deposit_i32_valid tcg_target_deposit_valid

File diff suppressed because it is too large Load Diff

View File

@ -1,123 +0,0 @@
/*
* Tiny Code Generator for QEMU
*
* Copyright (c) 2008 Fabrice Bellard
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#ifndef TCG_TARGET_HPPA
#define TCG_TARGET_HPPA 1
#define TCG_TARGET_WORDS_BIGENDIAN
#define TCG_TARGET_NB_REGS 32
typedef enum {
TCG_REG_R0 = 0,
TCG_REG_R1,
TCG_REG_RP,
TCG_REG_R3,
TCG_REG_R4,
TCG_REG_R5,
TCG_REG_R6,
TCG_REG_R7,
TCG_REG_R8,
TCG_REG_R9,
TCG_REG_R10,
TCG_REG_R11,
TCG_REG_R12,
TCG_REG_R13,
TCG_REG_R14,
TCG_REG_R15,
TCG_REG_R16,
TCG_REG_R17,
TCG_REG_R18,
TCG_REG_R19,
TCG_REG_R20,
TCG_REG_R21,
TCG_REG_R22,
TCG_REG_R23,
TCG_REG_R24,
TCG_REG_R25,
TCG_REG_R26,
TCG_REG_DP,
TCG_REG_RET0,
TCG_REG_RET1,
TCG_REG_SP,
TCG_REG_R31,
} TCGReg;
#define TCG_CT_CONST_0 0x0100
#define TCG_CT_CONST_S5 0x0200
#define TCG_CT_CONST_S11 0x0400
#define TCG_CT_CONST_MS11 0x0800
#define TCG_CT_CONST_AND 0x1000
#define TCG_CT_CONST_OR 0x2000
/* used for function call generation */
#define TCG_REG_CALL_STACK TCG_REG_SP
#define TCG_TARGET_STACK_ALIGN 64
#define TCG_TARGET_CALL_STACK_OFFSET -48
#define TCG_TARGET_STATIC_CALL_ARGS_SIZE 8*4
#define TCG_TARGET_CALL_ALIGN_ARGS 1
#define TCG_TARGET_STACK_GROWSUP
/* optional instructions */
#define TCG_TARGET_HAS_div_i32 0
#define TCG_TARGET_HAS_rem_i32 0
#define TCG_TARGET_HAS_rot_i32 1
#define TCG_TARGET_HAS_ext8s_i32 1
#define TCG_TARGET_HAS_ext16s_i32 1
#define TCG_TARGET_HAS_bswap16_i32 1
#define TCG_TARGET_HAS_bswap32_i32 1
#define TCG_TARGET_HAS_not_i32 1
#define TCG_TARGET_HAS_andc_i32 1
#define TCG_TARGET_HAS_orc_i32 0
#define TCG_TARGET_HAS_eqv_i32 0
#define TCG_TARGET_HAS_nand_i32 0
#define TCG_TARGET_HAS_nor_i32 0
#define TCG_TARGET_HAS_deposit_i32 1
#define TCG_TARGET_HAS_movcond_i32 1
#define TCG_TARGET_HAS_muls2_i32 0
#define TCG_TARGET_HAS_muluh_i32 0
#define TCG_TARGET_HAS_mulsh_i32 0
/* optional instructions automatically implemented */
#define TCG_TARGET_HAS_neg_i32 0 /* sub rd, 0, rs */
#define TCG_TARGET_HAS_ext8u_i32 0 /* and rd, rs, 0xff */
#define TCG_TARGET_HAS_ext16u_i32 0 /* and rd, rs, 0xffff */
#define TCG_AREG0 TCG_REG_R17
static inline void flush_icache_range(uintptr_t start, uintptr_t stop)
{
start &= ~31;
while (start <= stop) {
asm volatile ("fdc 0(%0)\n\t"
"sync\n\t"
"fic 0(%%sr4, %0)\n\t"
"sync"
: : "r"(start) : "memory");
start += 32;
}
}
#endif

View File

@ -22,6 +22,8 @@
* THE SOFTWARE.
*/
#include "tcg-be-ldst.h"
#ifndef NDEBUG
static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
#if TCG_TARGET_REG_BITS == 64
@ -1455,15 +1457,8 @@ static void add_qemu_ldst_label(TCGContext *s,
uint8_t *raddr,
uint8_t **label_ptr)
{
int idx;
TCGLabelQemuLdst *label;
TCGLabelQemuLdst *label = new_ldst_label(s);
if (s->nb_qemu_ldst_labels >= TCG_MAX_QEMU_LDST) {
tcg_abort();
}
idx = s->nb_qemu_ldst_labels++;
label = (TCGLabelQemuLdst *)&s->qemu_ldst_labels[idx];
label->is_ld = is_ld;
label->opc = opc;
label->datalo_reg = data_reg;
@ -1628,25 +1623,6 @@ static void tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
tcg_out_push(s, retaddr);
tcg_out_jmp(s, (uintptr_t)qemu_st_helpers[s_bits]);
}
/*
* Generate TB finalization at the end of block
*/
void tcg_out_tb_finalize(TCGContext *s)
{
int i;
TCGLabelQemuLdst *label;
/* qemu_ld/st slow paths */
for (i = 0; i < s->nb_qemu_ldst_labels; i++) {
label = (TCGLabelQemuLdst *)&s->qemu_ldst_labels[i];
if (label->is_ld) {
tcg_out_qemu_ld_slow_path(s, label);
} else {
tcg_out_qemu_st_slow_path(s, label);
}
}
}
#endif /* CONFIG_SOFTMMU */
static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,

View File

@ -130,6 +130,8 @@ typedef enum {
#define TCG_TARGET_HAS_mulsh_i64 0
#endif
#define TCG_TARGET_HAS_new_ldst 0
#define TCG_TARGET_deposit_i32_valid(ofs, len) \
(((ofs) == 0 && (len) == 8) || ((ofs) == 8 && (len) == 8) || \
((ofs) == 0 && (len) == 16))

View File

@ -23,6 +23,8 @@
* THE SOFTWARE.
*/
#include "tcg-be-null.h"
/*
* Register definitions
*/

View File

@ -151,6 +151,8 @@ typedef enum {
#define TCG_TARGET_HAS_mulsh_i32 0
#define TCG_TARGET_HAS_mulsh_i64 0
#define TCG_TARGET_HAS_new_ldst 0
#define TCG_TARGET_deposit_i32_valid(ofs, len) ((len) <= 16)
#define TCG_TARGET_deposit_i64_valid(ofs, len) ((len) <= 16)

View File

@ -24,6 +24,8 @@
* THE SOFTWARE.
*/
#include "tcg-be-null.h"
#if defined(TCG_TARGET_WORDS_BIGENDIAN) == defined(TARGET_WORDS_BIGENDIAN)
# define TCG_NEED_BSWAP 0
#else

View File

@ -122,6 +122,8 @@ extern bool use_mips32r2_instructions;
#define TCG_TARGET_HAS_ext16s_i32 use_mips32r2_instructions
#define TCG_TARGET_HAS_rot_i32 use_mips32r2_instructions
#define TCG_TARGET_HAS_new_ldst 0
/* optional instructions automatically implemented */
#define TCG_TARGET_HAS_neg_i32 0 /* sub rd, zero, rt */
#define TCG_TARGET_HAS_ext8u_i32 0 /* andi rt, rs, 0xff */

View File

@ -22,6 +22,8 @@
* THE SOFTWARE.
*/
#include "tcg-be-ldst.h"
static uint8_t *tb_ret_addr;
#if defined _CALL_DARWIN || defined __APPLE__
@ -532,15 +534,8 @@ static void add_qemu_ldst_label (TCGContext *s,
uint8_t *raddr,
uint8_t *label_ptr)
{
int idx;
TCGLabelQemuLdst *label;
TCGLabelQemuLdst *label = new_ldst_label(s);
if (s->nb_qemu_ldst_labels >= TCG_MAX_QEMU_LDST) {
tcg_abort();
}
idx = s->nb_qemu_ldst_labels++;
label = (TCGLabelQemuLdst *)&s->qemu_ldst_labels[idx];
label->is_ld = is_ld;
label->opc = opc;
label->datalo_reg = data_reg;
@ -889,23 +884,6 @@ static void tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
tcg_out_b(s, LK, (uintptr_t)st_trampolines[l->opc]);
tcg_out_b(s, 0, (uintptr_t)l->raddr);
}
void tcg_out_tb_finalize(TCGContext *s)
{
int i;
TCGLabelQemuLdst *label;
/* qemu_ld/st slow paths */
for (i = 0; i < s->nb_qemu_ldst_labels; i++) {
label = (TCGLabelQemuLdst *) &s->qemu_ldst_labels[i];
if (label->is_ld) {
tcg_out_qemu_ld_slow_path (s, label);
}
else {
tcg_out_qemu_st_slow_path (s, label);
}
}
}
#endif
#ifdef CONFIG_SOFTMMU

View File

@ -99,6 +99,8 @@ typedef enum {
#define TCG_TARGET_HAS_muluh_i32 0
#define TCG_TARGET_HAS_mulsh_i32 0
#define TCG_TARGET_HAS_new_ldst 0
#define TCG_AREG0 TCG_REG_R27
#define tcg_qemu_tb_exec(env, tb_ptr) \

View File

@ -22,6 +22,8 @@
* THE SOFTWARE.
*/
#include "tcg-be-ldst.h"
#define TCG_CT_CONST_S16 0x100
#define TCG_CT_CONST_U16 0x200
#define TCG_CT_CONST_S32 0x400
@ -931,15 +933,8 @@ static void add_qemu_ldst_label(TCGContext *s, bool is_ld, int opc,
int data_reg, int addr_reg, int mem_index,
uint8_t *raddr, uint8_t *label_ptr)
{
int idx;
TCGLabelQemuLdst *label;
TCGLabelQemuLdst *label = new_ldst_label(s);
if (s->nb_qemu_ldst_labels >= TCG_MAX_QEMU_LDST) {
tcg_abort();
}
idx = s->nb_qemu_ldst_labels++;
label = (TCGLabelQemuLdst *)&s->qemu_ldst_labels[idx];
label->is_ld = is_ld;
label->opc = opc;
label->datalo_reg = data_reg;
@ -998,21 +993,6 @@ static void tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
tcg_out_b(s, 0, (uintptr_t)lb->raddr);
}
void tcg_out_tb_finalize(TCGContext *s)
{
int i, n = s->nb_qemu_ldst_labels;
/* qemu_ld/st slow paths */
for (i = 0; i < n; i++) {
TCGLabelQemuLdst *label = &s->qemu_ldst_labels[i];
if (label->is_ld) {
tcg_out_qemu_ld_slow_path(s, label);
} else {
tcg_out_qemu_st_slow_path(s, label);
}
}
}
#endif /* SOFTMMU */
static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, int opc)

View File

@ -123,6 +123,8 @@ typedef enum {
#define TCG_TARGET_HAS_muluh_i64 1
#define TCG_TARGET_HAS_mulsh_i64 1
#define TCG_TARGET_HAS_new_ldst 0
#define TCG_AREG0 TCG_REG_R27
#define TCG_TARGET_EXTEND_ARGS 1

View File

@ -24,6 +24,8 @@
* THE SOFTWARE.
*/
#include "tcg-be-null.h"
/* We only support generating code for 64-bit mode. */
#if TCG_TARGET_REG_BITS != 64
#error "unsupported code generation mode"

View File

@ -99,6 +99,8 @@ typedef enum TCGReg {
#define TCG_TARGET_HAS_muluh_i64 0
#define TCG_TARGET_HAS_mulsh_i64 0
#define TCG_TARGET_HAS_new_ldst 0
extern bool tcg_target_deposit_valid(int ofs, int len);
#define TCG_TARGET_deposit_i32_valid tcg_target_deposit_valid
#define TCG_TARGET_deposit_i64_valid tcg_target_deposit_valid

View File

@ -22,6 +22,8 @@
* THE SOFTWARE.
*/
#include "tcg-be-null.h"
#ifndef NDEBUG
static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
"%g0",

View File

@ -148,6 +148,8 @@ typedef enum {
#define TCG_TARGET_HAS_mulsh_i64 0
#endif
#define TCG_TARGET_HAS_new_ldst 0
#define TCG_AREG0 TCG_REG_I0
static inline void flush_icache_range(uintptr_t start, uintptr_t stop)

90
tcg/tcg-be-ldst.h Normal file
View File

@ -0,0 +1,90 @@
/*
* TCG Backend Data: load-store optimization only.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#ifdef CONFIG_SOFTMMU
#define TCG_MAX_QEMU_LDST 640
typedef struct TCGLabelQemuLdst {
int is_ld:1; /* qemu_ld: 1, qemu_st: 0 */
int opc:4;
TCGReg addrlo_reg; /* reg index for low word of guest virtual addr */
TCGReg addrhi_reg; /* reg index for high word of guest virtual addr */
TCGReg datalo_reg; /* reg index for low word to be loaded or stored */
TCGReg datahi_reg; /* reg index for high word to be loaded or stored */
int mem_index; /* soft MMU memory index */
uint8_t *raddr; /* gen code addr of the next IR of qemu_ld/st IR */
uint8_t *label_ptr[2]; /* label pointers to be updated */
} TCGLabelQemuLdst;
typedef struct TCGBackendData {
int nb_ldst_labels;
TCGLabelQemuLdst ldst_labels[TCG_MAX_QEMU_LDST];
} TCGBackendData;
/*
* Initialize TB backend data at the beginning of the TB.
*/
static inline void tcg_out_tb_init(TCGContext *s)
{
s->be->nb_ldst_labels = 0;
}
/*
* Generate TB finalization at the end of block
*/
static void tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l);
static void tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l);
static void tcg_out_tb_finalize(TCGContext *s)
{
TCGLabelQemuLdst *lb = s->be->ldst_labels;
int i, n = s->be->nb_ldst_labels;
/* qemu_ld/st slow paths */
for (i = 0; i < n; i++) {
if (lb[i].is_ld) {
tcg_out_qemu_ld_slow_path(s, lb + i);
} else {
tcg_out_qemu_st_slow_path(s, lb + i);
}
}
}
/*
* Allocate a new TCGLabelQemuLdst entry.
*/
static inline TCGLabelQemuLdst *new_ldst_label(TCGContext *s)
{
TCGBackendData *be = s->be;
int n = be->nb_ldst_labels;
assert(n < TCG_MAX_QEMU_LDST);
be->nb_ldst_labels = n + 1;
return &be->ldst_labels[n];
}
#else
#include "tcg-be-null.h"
#endif /* CONFIG_SOFTMMU */

43
tcg/tcg-be-null.h Normal file
View File

@ -0,0 +1,43 @@
/*
* TCG Backend Data: No backend data
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
typedef struct TCGBackendData {
/* Empty */
char dummy;
} TCGBackendData;
/*
* Initialize TB backend data at the beginning of the TB.
*/
static inline void tcg_out_tb_init(TCGContext *s)
{
}
/*
* Generate TB finalization at the end of block
*/
static inline void tcg_out_tb_finalize(TCGContext *s)
{
}

View File

@ -137,24 +137,6 @@ static inline void tcg_gen_ldst_op_i64(TCGOpcode opc, TCGv_i64 val,
*tcg_ctx.gen_opparam_ptr++ = offset;
}
static inline void tcg_gen_qemu_ldst_op_i64_i32(TCGOpcode opc, TCGv_i64 val,
TCGv_i32 addr, TCGArg mem_index)
{
*tcg_ctx.gen_opc_ptr++ = opc;
*tcg_ctx.gen_opparam_ptr++ = GET_TCGV_I64(val);
*tcg_ctx.gen_opparam_ptr++ = GET_TCGV_I32(addr);
*tcg_ctx.gen_opparam_ptr++ = mem_index;
}
static inline void tcg_gen_qemu_ldst_op_i64_i64(TCGOpcode opc, TCGv_i64 val,
TCGv_i64 addr, TCGArg mem_index)
{
*tcg_ctx.gen_opc_ptr++ = opc;
*tcg_ctx.gen_opparam_ptr++ = GET_TCGV_I64(val);
*tcg_ctx.gen_opparam_ptr++ = GET_TCGV_I64(addr);
*tcg_ctx.gen_opparam_ptr++ = mem_index;
}
static inline void tcg_gen_op4_i32(TCGOpcode opc, TCGv_i32 arg1, TCGv_i32 arg2,
TCGv_i32 arg3, TCGv_i32 arg4)
{
@ -361,6 +343,21 @@ static inline void tcg_gen_op6ii_i64(TCGOpcode opc, TCGv_i64 arg1,
*tcg_ctx.gen_opparam_ptr++ = arg6;
}
static inline void tcg_add_param_i32(TCGv_i32 val)
{
*tcg_ctx.gen_opparam_ptr++ = GET_TCGV_I32(val);
}
static inline void tcg_add_param_i64(TCGv_i64 val)
{
#if TCG_TARGET_REG_BITS == 32
*tcg_ctx.gen_opparam_ptr++ = GET_TCGV_I32(TCGV_LOW(val));
*tcg_ctx.gen_opparam_ptr++ = GET_TCGV_I32(TCGV_HIGH(val));
#else
*tcg_ctx.gen_opparam_ptr++ = GET_TCGV_I64(val);
#endif
}
static inline void gen_set_label(int n)
{
tcg_gen_op1i(INDEX_op_set_label, n);
@ -2600,11 +2597,12 @@ static inline void tcg_gen_muls2_i64(TCGv_i64 rl, TCGv_i64 rh,
#define tcg_global_mem_new tcg_global_mem_new_i32
#define tcg_temp_local_new() tcg_temp_local_new_i32()
#define tcg_temp_free tcg_temp_free_i32
#define tcg_gen_qemu_ldst_op tcg_gen_op3i_i32
#define tcg_gen_qemu_ldst_op_i64 tcg_gen_qemu_ldst_op_i64_i32
#define TCGV_UNUSED(x) TCGV_UNUSED_I32(x)
#define TCGV_IS_UNUSED(x) TCGV_IS_UNUSED_I32(x)
#define TCGV_EQUAL(a, b) TCGV_EQUAL_I32(a, b)
#define tcg_add_param_tl tcg_add_param_i32
#define tcg_gen_qemu_ld_tl tcg_gen_qemu_ld_i32
#define tcg_gen_qemu_st_tl tcg_gen_qemu_st_i32
#else
#define TCGv TCGv_i64
#define tcg_temp_new() tcg_temp_new_i64()
@ -2612,11 +2610,12 @@ static inline void tcg_gen_muls2_i64(TCGv_i64 rl, TCGv_i64 rh,
#define tcg_global_mem_new tcg_global_mem_new_i64
#define tcg_temp_local_new() tcg_temp_local_new_i64()
#define tcg_temp_free tcg_temp_free_i64
#define tcg_gen_qemu_ldst_op tcg_gen_op3i_i64
#define tcg_gen_qemu_ldst_op_i64 tcg_gen_qemu_ldst_op_i64_i64
#define TCGV_UNUSED(x) TCGV_UNUSED_I64(x)
#define TCGV_IS_UNUSED(x) TCGV_IS_UNUSED_I64(x)
#define TCGV_EQUAL(a, b) TCGV_EQUAL_I64(a, b)
#define tcg_add_param_tl tcg_add_param_i64
#define tcg_gen_qemu_ld_tl tcg_gen_qemu_ld_i64
#define tcg_gen_qemu_st_tl tcg_gen_qemu_st_i64
#endif
/* debug info: write the PC of the corresponding QEMU CPU instruction */
@ -2648,197 +2647,67 @@ static inline void tcg_gen_goto_tb(unsigned idx)
tcg_gen_op1i(INDEX_op_goto_tb, idx);
}
#if TCG_TARGET_REG_BITS == 32
void tcg_gen_qemu_ld_i32(TCGv_i32, TCGv, TCGArg, TCGMemOp);
void tcg_gen_qemu_st_i32(TCGv_i32, TCGv, TCGArg, TCGMemOp);
void tcg_gen_qemu_ld_i64(TCGv_i64, TCGv, TCGArg, TCGMemOp);
void tcg_gen_qemu_st_i64(TCGv_i64, TCGv, TCGArg, TCGMemOp);
static inline void tcg_gen_qemu_ld8u(TCGv ret, TCGv addr, int mem_index)
{
#if TARGET_LONG_BITS == 32
tcg_gen_op3i_i32(INDEX_op_qemu_ld8u, ret, addr, mem_index);
#else
tcg_gen_op4i_i32(INDEX_op_qemu_ld8u, TCGV_LOW(ret), TCGV_LOW(addr),
TCGV_HIGH(addr), mem_index);
tcg_gen_movi_i32(TCGV_HIGH(ret), 0);
#endif
tcg_gen_qemu_ld_tl(ret, addr, mem_index, MO_UB);
}
static inline void tcg_gen_qemu_ld8s(TCGv ret, TCGv addr, int mem_index)
{
#if TARGET_LONG_BITS == 32
tcg_gen_op3i_i32(INDEX_op_qemu_ld8s, ret, addr, mem_index);
#else
tcg_gen_op4i_i32(INDEX_op_qemu_ld8s, TCGV_LOW(ret), TCGV_LOW(addr),
TCGV_HIGH(addr), mem_index);
tcg_gen_sari_i32(TCGV_HIGH(ret), TCGV_LOW(ret), 31);
#endif
tcg_gen_qemu_ld_tl(ret, addr, mem_index, MO_SB);
}
static inline void tcg_gen_qemu_ld16u(TCGv ret, TCGv addr, int mem_index)
{
#if TARGET_LONG_BITS == 32
tcg_gen_op3i_i32(INDEX_op_qemu_ld16u, ret, addr, mem_index);
#else
tcg_gen_op4i_i32(INDEX_op_qemu_ld16u, TCGV_LOW(ret), TCGV_LOW(addr),
TCGV_HIGH(addr), mem_index);
tcg_gen_movi_i32(TCGV_HIGH(ret), 0);
#endif
tcg_gen_qemu_ld_tl(ret, addr, mem_index, MO_TEUW);
}
static inline void tcg_gen_qemu_ld16s(TCGv ret, TCGv addr, int mem_index)
{
#if TARGET_LONG_BITS == 32
tcg_gen_op3i_i32(INDEX_op_qemu_ld16s, ret, addr, mem_index);
#else
tcg_gen_op4i_i32(INDEX_op_qemu_ld16s, TCGV_LOW(ret), TCGV_LOW(addr),
TCGV_HIGH(addr), mem_index);
tcg_gen_sari_i32(TCGV_HIGH(ret), TCGV_LOW(ret), 31);
#endif
tcg_gen_qemu_ld_tl(ret, addr, mem_index, MO_TESW);
}
static inline void tcg_gen_qemu_ld32u(TCGv ret, TCGv addr, int mem_index)
{
#if TARGET_LONG_BITS == 32
tcg_gen_op3i_i32(INDEX_op_qemu_ld32, ret, addr, mem_index);
#else
tcg_gen_op4i_i32(INDEX_op_qemu_ld32, TCGV_LOW(ret), TCGV_LOW(addr),
TCGV_HIGH(addr), mem_index);
tcg_gen_movi_i32(TCGV_HIGH(ret), 0);
#endif
tcg_gen_qemu_ld_tl(ret, addr, mem_index, MO_TEUL);
}
static inline void tcg_gen_qemu_ld32s(TCGv ret, TCGv addr, int mem_index)
{
#if TARGET_LONG_BITS == 32
tcg_gen_op3i_i32(INDEX_op_qemu_ld32, ret, addr, mem_index);
#else
tcg_gen_op4i_i32(INDEX_op_qemu_ld32, TCGV_LOW(ret), TCGV_LOW(addr),
TCGV_HIGH(addr), mem_index);
tcg_gen_sari_i32(TCGV_HIGH(ret), TCGV_LOW(ret), 31);
#endif
tcg_gen_qemu_ld_tl(ret, addr, mem_index, MO_TESL);
}
static inline void tcg_gen_qemu_ld64(TCGv_i64 ret, TCGv addr, int mem_index)
{
#if TARGET_LONG_BITS == 32
tcg_gen_op4i_i32(INDEX_op_qemu_ld64, TCGV_LOW(ret), TCGV_HIGH(ret), addr, mem_index);
#else
tcg_gen_op5i_i32(INDEX_op_qemu_ld64, TCGV_LOW(ret), TCGV_HIGH(ret),
TCGV_LOW(addr), TCGV_HIGH(addr), mem_index);
#endif
tcg_gen_qemu_ld_i64(ret, addr, mem_index, MO_TEQ);
}
static inline void tcg_gen_qemu_st8(TCGv arg, TCGv addr, int mem_index)
{
#if TARGET_LONG_BITS == 32
tcg_gen_op3i_i32(INDEX_op_qemu_st8, arg, addr, mem_index);
#else
tcg_gen_op4i_i32(INDEX_op_qemu_st8, TCGV_LOW(arg), TCGV_LOW(addr),
TCGV_HIGH(addr), mem_index);
#endif
tcg_gen_qemu_st_tl(arg, addr, mem_index, MO_UB);
}
static inline void tcg_gen_qemu_st16(TCGv arg, TCGv addr, int mem_index)
{
#if TARGET_LONG_BITS == 32
tcg_gen_op3i_i32(INDEX_op_qemu_st16, arg, addr, mem_index);
#else
tcg_gen_op4i_i32(INDEX_op_qemu_st16, TCGV_LOW(arg), TCGV_LOW(addr),
TCGV_HIGH(addr), mem_index);
#endif
tcg_gen_qemu_st_tl(arg, addr, mem_index, MO_TEUW);
}
static inline void tcg_gen_qemu_st32(TCGv arg, TCGv addr, int mem_index)
{
#if TARGET_LONG_BITS == 32
tcg_gen_op3i_i32(INDEX_op_qemu_st32, arg, addr, mem_index);
#else
tcg_gen_op4i_i32(INDEX_op_qemu_st32, TCGV_LOW(arg), TCGV_LOW(addr),
TCGV_HIGH(addr), mem_index);
#endif
tcg_gen_qemu_st_tl(arg, addr, mem_index, MO_TEUL);
}
static inline void tcg_gen_qemu_st64(TCGv_i64 arg, TCGv addr, int mem_index)
{
#if TARGET_LONG_BITS == 32
tcg_gen_op4i_i32(INDEX_op_qemu_st64, TCGV_LOW(arg), TCGV_HIGH(arg), addr,
mem_index);
#else
tcg_gen_op5i_i32(INDEX_op_qemu_st64, TCGV_LOW(arg), TCGV_HIGH(arg),
TCGV_LOW(addr), TCGV_HIGH(addr), mem_index);
#endif
tcg_gen_qemu_st_i64(arg, addr, mem_index, MO_TEQ);
}
#define tcg_gen_ld_ptr(R, A, O) tcg_gen_ld_i32(TCGV_PTR_TO_NAT(R), (A), (O))
#define tcg_gen_discard_ptr(A) tcg_gen_discard_i32(TCGV_PTR_TO_NAT(A))
#else /* TCG_TARGET_REG_BITS == 32 */
static inline void tcg_gen_qemu_ld8u(TCGv ret, TCGv addr, int mem_index)
{
tcg_gen_qemu_ldst_op(INDEX_op_qemu_ld8u, ret, addr, mem_index);
}
static inline void tcg_gen_qemu_ld8s(TCGv ret, TCGv addr, int mem_index)
{
tcg_gen_qemu_ldst_op(INDEX_op_qemu_ld8s, ret, addr, mem_index);
}
static inline void tcg_gen_qemu_ld16u(TCGv ret, TCGv addr, int mem_index)
{
tcg_gen_qemu_ldst_op(INDEX_op_qemu_ld16u, ret, addr, mem_index);
}
static inline void tcg_gen_qemu_ld16s(TCGv ret, TCGv addr, int mem_index)
{
tcg_gen_qemu_ldst_op(INDEX_op_qemu_ld16s, ret, addr, mem_index);
}
static inline void tcg_gen_qemu_ld32u(TCGv ret, TCGv addr, int mem_index)
{
#if TARGET_LONG_BITS == 32
tcg_gen_qemu_ldst_op(INDEX_op_qemu_ld32, ret, addr, mem_index);
#else
tcg_gen_qemu_ldst_op(INDEX_op_qemu_ld32u, ret, addr, mem_index);
#endif
}
static inline void tcg_gen_qemu_ld32s(TCGv ret, TCGv addr, int mem_index)
{
#if TARGET_LONG_BITS == 32
tcg_gen_qemu_ldst_op(INDEX_op_qemu_ld32, ret, addr, mem_index);
#else
tcg_gen_qemu_ldst_op(INDEX_op_qemu_ld32s, ret, addr, mem_index);
#endif
}
static inline void tcg_gen_qemu_ld64(TCGv_i64 ret, TCGv addr, int mem_index)
{
tcg_gen_qemu_ldst_op_i64(INDEX_op_qemu_ld64, ret, addr, mem_index);
}
static inline void tcg_gen_qemu_st8(TCGv arg, TCGv addr, int mem_index)
{
tcg_gen_qemu_ldst_op(INDEX_op_qemu_st8, arg, addr, mem_index);
}
static inline void tcg_gen_qemu_st16(TCGv arg, TCGv addr, int mem_index)
{
tcg_gen_qemu_ldst_op(INDEX_op_qemu_st16, arg, addr, mem_index);
}
static inline void tcg_gen_qemu_st32(TCGv arg, TCGv addr, int mem_index)
{
tcg_gen_qemu_ldst_op(INDEX_op_qemu_st32, arg, addr, mem_index);
}
static inline void tcg_gen_qemu_st64(TCGv_i64 arg, TCGv addr, int mem_index)
{
tcg_gen_qemu_ldst_op_i64(INDEX_op_qemu_st64, arg, addr, mem_index);
}
#define tcg_gen_ld_ptr(R, A, O) tcg_gen_ld_i64(TCGV_PTR_TO_NAT(R), (A), (O))
#define tcg_gen_discard_ptr(A) tcg_gen_discard_i64(TCGV_PTR_TO_NAT(A))
#endif /* TCG_TARGET_REG_BITS != 32 */
#if TARGET_LONG_BITS == 64
#define tcg_gen_movi_tl tcg_gen_movi_i64
#define tcg_gen_mov_tl tcg_gen_mov_i64
@ -2997,17 +2866,25 @@ static inline void tcg_gen_qemu_st64(TCGv_i64 arg, TCGv addr, int mem_index)
#endif
#if TCG_TARGET_REG_BITS == 32
#define tcg_gen_add_ptr(R, A, B) tcg_gen_add_i32(TCGV_PTR_TO_NAT(R), \
TCGV_PTR_TO_NAT(A), \
TCGV_PTR_TO_NAT(B))
#define tcg_gen_addi_ptr(R, A, B) tcg_gen_addi_i32(TCGV_PTR_TO_NAT(R), \
TCGV_PTR_TO_NAT(A), (B))
#define tcg_gen_ext_i32_ptr(R, A) tcg_gen_mov_i32(TCGV_PTR_TO_NAT(R), (A))
#else /* TCG_TARGET_REG_BITS == 32 */
#define tcg_gen_add_ptr(R, A, B) tcg_gen_add_i64(TCGV_PTR_TO_NAT(R), \
TCGV_PTR_TO_NAT(A), \
TCGV_PTR_TO_NAT(B))
#define tcg_gen_addi_ptr(R, A, B) tcg_gen_addi_i64(TCGV_PTR_TO_NAT(R), \
TCGV_PTR_TO_NAT(A), (B))
#define tcg_gen_ext_i32_ptr(R, A) tcg_gen_ext_i32_i64(TCGV_PTR_TO_NAT(R), (A))
#endif /* TCG_TARGET_REG_BITS != 32 */
# define tcg_gen_ld_ptr(R, A, O) \
tcg_gen_ld_i32(TCGV_PTR_TO_NAT(R), (A), (O))
# define tcg_gen_discard_ptr(A) \
tcg_gen_discard_i32(TCGV_PTR_TO_NAT(A))
# define tcg_gen_add_ptr(R, A, B) \
tcg_gen_add_i32(TCGV_PTR_TO_NAT(R), TCGV_PTR_TO_NAT(A), TCGV_PTR_TO_NAT(B))
# define tcg_gen_addi_ptr(R, A, B) \
tcg_gen_addi_i32(TCGV_PTR_TO_NAT(R), TCGV_PTR_TO_NAT(A), (B))
# define tcg_gen_ext_i32_ptr(R, A) \
tcg_gen_mov_i32(TCGV_PTR_TO_NAT(R), (A))
#else
# define tcg_gen_ld_ptr(R, A, O) \
tcg_gen_ld_i64(TCGV_PTR_TO_NAT(R), (A), (O))
# define tcg_gen_discard_ptr(A) \
tcg_gen_discard_i64(TCGV_PTR_TO_NAT(A))
# define tcg_gen_add_ptr(R, A, B) \
tcg_gen_add_i64(TCGV_PTR_TO_NAT(R), TCGV_PTR_TO_NAT(A), TCGV_PTR_TO_NAT(B))
# define tcg_gen_addi_ptr(R, A, B) \
tcg_gen_addi_i64(TCGV_PTR_TO_NAT(R), TCGV_PTR_TO_NAT(A), (B))
# define tcg_gen_ext_i32_ptr(R, A) \
tcg_gen_ext_i32_i64(TCGV_PTR_TO_NAT(R), (A))
#endif /* TCG_TARGET_REG_BITS == 32 */

View File

@ -180,79 +180,107 @@ DEF(debug_insn_start, 0, 0, 1, TCG_OPF_NOT_PRESENT)
#endif
DEF(exit_tb, 0, 0, 1, TCG_OPF_BB_END)
DEF(goto_tb, 0, 0, 1, TCG_OPF_BB_END)
/* Note: even if TARGET_LONG_BITS is not defined, the INDEX_op
constants must be defined */
#define IMPL_NEW_LDST \
(TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS \
| IMPL(TCG_TARGET_HAS_new_ldst))
#if TARGET_LONG_BITS <= TCG_TARGET_REG_BITS
DEF(qemu_ld_i32, 1, 1, 2, IMPL_NEW_LDST)
DEF(qemu_st_i32, 0, 2, 2, IMPL_NEW_LDST)
# if TCG_TARGET_REG_BITS == 64
DEF(qemu_ld_i64, 1, 1, 2, IMPL_NEW_LDST | TCG_OPF_64BIT)
DEF(qemu_st_i64, 0, 2, 2, IMPL_NEW_LDST | TCG_OPF_64BIT)
# else
DEF(qemu_ld_i64, 2, 1, 2, IMPL_NEW_LDST | TCG_OPF_64BIT)
DEF(qemu_st_i64, 0, 3, 2, IMPL_NEW_LDST | TCG_OPF_64BIT)
# endif
#else
DEF(qemu_ld_i32, 1, 2, 2, IMPL_NEW_LDST)
DEF(qemu_st_i32, 0, 3, 2, IMPL_NEW_LDST)
DEF(qemu_ld_i64, 2, 2, 2, IMPL_NEW_LDST | TCG_OPF_64BIT)
DEF(qemu_st_i64, 0, 4, 2, IMPL_NEW_LDST | TCG_OPF_64BIT)
#endif
#undef IMPL_NEW_LDST
#define IMPL_OLD_LDST \
(TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS \
| IMPL(!TCG_TARGET_HAS_new_ldst))
#if TCG_TARGET_REG_BITS == 32
#if TARGET_LONG_BITS == 32
DEF(qemu_ld8u, 1, 1, 1, TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS)
DEF(qemu_ld8u, 1, 1, 1, IMPL_OLD_LDST)
#else
DEF(qemu_ld8u, 1, 2, 1, TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS)
DEF(qemu_ld8u, 1, 2, 1, IMPL_OLD_LDST)
#endif
#if TARGET_LONG_BITS == 32
DEF(qemu_ld8s, 1, 1, 1, TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS)
DEF(qemu_ld8s, 1, 1, 1, IMPL_OLD_LDST)
#else
DEF(qemu_ld8s, 1, 2, 1, TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS)
DEF(qemu_ld8s, 1, 2, 1, IMPL_OLD_LDST)
#endif
#if TARGET_LONG_BITS == 32
DEF(qemu_ld16u, 1, 1, 1, TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS)
DEF(qemu_ld16u, 1, 1, 1, IMPL_OLD_LDST)
#else
DEF(qemu_ld16u, 1, 2, 1, TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS)
DEF(qemu_ld16u, 1, 2, 1, IMPL_OLD_LDST)
#endif
#if TARGET_LONG_BITS == 32
DEF(qemu_ld16s, 1, 1, 1, TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS)
DEF(qemu_ld16s, 1, 1, 1, IMPL_OLD_LDST)
#else
DEF(qemu_ld16s, 1, 2, 1, TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS)
DEF(qemu_ld16s, 1, 2, 1, IMPL_OLD_LDST)
#endif
#if TARGET_LONG_BITS == 32
DEF(qemu_ld32, 1, 1, 1, TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS)
DEF(qemu_ld32, 1, 1, 1, IMPL_OLD_LDST)
#else
DEF(qemu_ld32, 1, 2, 1, TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS)
DEF(qemu_ld32, 1, 2, 1, IMPL_OLD_LDST)
#endif
#if TARGET_LONG_BITS == 32
DEF(qemu_ld64, 2, 1, 1, TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS)
DEF(qemu_ld64, 2, 1, 1, IMPL_OLD_LDST | TCG_OPF_64BIT)
#else
DEF(qemu_ld64, 2, 2, 1, TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS)
DEF(qemu_ld64, 2, 2, 1, IMPL_OLD_LDST | TCG_OPF_64BIT)
#endif
#if TARGET_LONG_BITS == 32
DEF(qemu_st8, 0, 2, 1, TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS)
DEF(qemu_st8, 0, 2, 1, IMPL_OLD_LDST)
#else
DEF(qemu_st8, 0, 3, 1, TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS)
DEF(qemu_st8, 0, 3, 1, IMPL_OLD_LDST)
#endif
#if TARGET_LONG_BITS == 32
DEF(qemu_st16, 0, 2, 1, TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS)
DEF(qemu_st16, 0, 2, 1, IMPL_OLD_LDST)
#else
DEF(qemu_st16, 0, 3, 1, TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS)
DEF(qemu_st16, 0, 3, 1, IMPL_OLD_LDST)
#endif
#if TARGET_LONG_BITS == 32
DEF(qemu_st32, 0, 2, 1, TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS)
DEF(qemu_st32, 0, 2, 1, IMPL_OLD_LDST)
#else
DEF(qemu_st32, 0, 3, 1, TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS)
DEF(qemu_st32, 0, 3, 1, IMPL_OLD_LDST)
#endif
#if TARGET_LONG_BITS == 32
DEF(qemu_st64, 0, 3, 1, TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS)
DEF(qemu_st64, 0, 3, 1, IMPL_OLD_LDST | TCG_OPF_64BIT)
#else
DEF(qemu_st64, 0, 4, 1, TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS)
DEF(qemu_st64, 0, 4, 1, IMPL_OLD_LDST | TCG_OPF_64BIT)
#endif
#else /* TCG_TARGET_REG_BITS == 32 */
DEF(qemu_ld8u, 1, 1, 1, TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS)
DEF(qemu_ld8s, 1, 1, 1, TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS)
DEF(qemu_ld16u, 1, 1, 1, TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS)
DEF(qemu_ld16s, 1, 1, 1, TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS)
DEF(qemu_ld32, 1, 1, 1, TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS)
DEF(qemu_ld32u, 1, 1, 1, TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS)
DEF(qemu_ld32s, 1, 1, 1, TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS)
DEF(qemu_ld64, 1, 1, 1, TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS)
DEF(qemu_ld8u, 1, 1, 1, IMPL_OLD_LDST | TCG_OPF_64BIT)
DEF(qemu_ld8s, 1, 1, 1, IMPL_OLD_LDST | TCG_OPF_64BIT)
DEF(qemu_ld16u, 1, 1, 1, IMPL_OLD_LDST | TCG_OPF_64BIT)
DEF(qemu_ld16s, 1, 1, 1, IMPL_OLD_LDST | TCG_OPF_64BIT)
DEF(qemu_ld32, 1, 1, 1, IMPL_OLD_LDST | TCG_OPF_64BIT)
DEF(qemu_ld32u, 1, 1, 1, IMPL_OLD_LDST | TCG_OPF_64BIT)
DEF(qemu_ld32s, 1, 1, 1, IMPL_OLD_LDST | TCG_OPF_64BIT)
DEF(qemu_ld64, 1, 1, 1, IMPL_OLD_LDST | TCG_OPF_64BIT)
DEF(qemu_st8, 0, 2, 1, TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS)
DEF(qemu_st16, 0, 2, 1, TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS)
DEF(qemu_st32, 0, 2, 1, TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS)
DEF(qemu_st64, 0, 2, 1, TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS)
DEF(qemu_st8, 0, 2, 1, IMPL_OLD_LDST | TCG_OPF_64BIT)
DEF(qemu_st16, 0, 2, 1, IMPL_OLD_LDST | TCG_OPF_64BIT)
DEF(qemu_st32, 0, 2, 1, IMPL_OLD_LDST | TCG_OPF_64BIT)
DEF(qemu_st64, 0, 2, 1, IMPL_OLD_LDST | TCG_OPF_64BIT)
#endif /* TCG_TARGET_REG_BITS != 32 */
#undef IMPL_OLD_LDST
#undef IMPL
#undef IMPL64
#undef DEF

338
tcg/tcg.c
View File

@ -103,6 +103,9 @@ static void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg, TCGReg arg1,
intptr_t arg2);
static int tcg_target_const_match(tcg_target_long val,
const TCGArgConstraint *arg_ct);
static void tcg_out_tb_init(TCGContext *s);
static void tcg_out_tb_finalize(TCGContext *s);
TCGOpDef tcg_op_defs[] = {
#define DEF(s, oargs, iargs, cargs, flags) { #s, oargs, iargs, cargs, iargs + oargs + cargs, flags },
@ -254,12 +257,41 @@ void tcg_pool_reset(TCGContext *s)
s->pool_current = NULL;
}
#include "helper.h"
typedef struct TCGHelperInfo {
void *func;
const char *name;
} TCGHelperInfo;
static const TCGHelperInfo all_helpers[] = {
#define GEN_HELPER 2
#include "helper.h"
/* Include tcg-runtime.c functions. */
{ tcg_helper_div_i32, "div_i32" },
{ tcg_helper_rem_i32, "rem_i32" },
{ tcg_helper_divu_i32, "divu_i32" },
{ tcg_helper_remu_i32, "remu_i32" },
{ tcg_helper_shl_i64, "shl_i64" },
{ tcg_helper_shr_i64, "shr_i64" },
{ tcg_helper_sar_i64, "sar_i64" },
{ tcg_helper_div_i64, "div_i64" },
{ tcg_helper_rem_i64, "rem_i64" },
{ tcg_helper_divu_i64, "divu_i64" },
{ tcg_helper_remu_i64, "remu_i64" },
{ tcg_helper_mulsh_i64, "mulsh_i64" },
{ tcg_helper_muluh_i64, "muluh_i64" },
};
void tcg_context_init(TCGContext *s)
{
int op, total_args, n;
int op, total_args, n, i;
TCGOpDef *def;
TCGArgConstraint *args_ct;
int *sorted_args;
GHashTable *helper_table;
memset(s, 0, sizeof(*s));
s->nb_globals = 0;
@ -284,7 +316,16 @@ void tcg_context_init(TCGContext *s)
sorted_args += n;
args_ct += n;
}
/* Register helpers. */
/* Use g_direct_hash/equal for direct pointer comparisons on func. */
s->helpers = helper_table = g_hash_table_new(NULL, NULL);
for (i = 0; i < ARRAY_SIZE(all_helpers); ++i) {
g_hash_table_insert(helper_table, (gpointer)all_helpers[i].func,
(gpointer)all_helpers[i].name);
}
tcg_target_init(s);
}
@ -332,13 +373,7 @@ void tcg_func_start(TCGContext *s)
s->gen_opc_ptr = s->gen_opc_buf;
s->gen_opparam_ptr = s->gen_opparam_buf;
#if defined(CONFIG_QEMU_LDST_OPTIMIZATION) && defined(CONFIG_SOFTMMU)
/* Initialize qemu_ld/st labels to assist code generation at the end of TB
for TLB miss cases at the end of TB */
s->qemu_ldst_labels = tcg_malloc(sizeof(TCGLabelQemuLdst) *
TCG_MAX_QEMU_LDST);
s->nb_qemu_ldst_labels = 0;
#endif
s->be = tcg_malloc(sizeof(TCGBackendData));
}
static inline void tcg_temp_alloc(TCGContext *s, int n)
@ -620,25 +655,6 @@ int tcg_check_temp_count(void)
}
#endif
void tcg_register_helper(void *func, const char *name)
{
TCGContext *s = &tcg_ctx;
int n;
if ((s->nb_helpers + 1) > s->allocated_helpers) {
n = s->allocated_helpers;
if (n == 0) {
n = 4;
} else {
n *= 2;
}
s->helpers = realloc(s->helpers, n * sizeof(TCGHelperInfo));
s->allocated_helpers = n;
}
s->helpers[s->nb_helpers].func = (uintptr_t)func;
s->helpers[s->nb_helpers].name = name;
s->nb_helpers++;
}
/* Note: we convert the 64 bit args to 32 bit and do some alignment
and endian swap. Maybe it would be better to do the alignment
and endian swap in tcg_reg_alloc_call(). */
@ -795,6 +811,188 @@ void tcg_gen_shifti_i64(TCGv_i64 ret, TCGv_i64 arg1,
}
#endif
static inline TCGMemOp tcg_canonicalize_memop(TCGMemOp op, bool is64, bool st)
{
switch (op & MO_SIZE) {
case MO_8:
op &= ~MO_BSWAP;
break;
case MO_16:
break;
case MO_32:
if (!is64) {
op &= ~MO_SIGN;
}
break;
case MO_64:
if (!is64) {
tcg_abort();
}
break;
}
if (st) {
op &= ~MO_SIGN;
}
return op;
}
static const TCGOpcode old_ld_opc[8] = {
[MO_UB] = INDEX_op_qemu_ld8u,
[MO_SB] = INDEX_op_qemu_ld8s,
[MO_UW] = INDEX_op_qemu_ld16u,
[MO_SW] = INDEX_op_qemu_ld16s,
#if TCG_TARGET_REG_BITS == 32
[MO_UL] = INDEX_op_qemu_ld32,
[MO_SL] = INDEX_op_qemu_ld32,
#else
[MO_UL] = INDEX_op_qemu_ld32u,
[MO_SL] = INDEX_op_qemu_ld32s,
#endif
[MO_Q] = INDEX_op_qemu_ld64,
};
static const TCGOpcode old_st_opc[4] = {
[MO_UB] = INDEX_op_qemu_st8,
[MO_UW] = INDEX_op_qemu_st16,
[MO_UL] = INDEX_op_qemu_st32,
[MO_Q] = INDEX_op_qemu_st64,
};
void tcg_gen_qemu_ld_i32(TCGv_i32 val, TCGv addr, TCGArg idx, TCGMemOp memop)
{
memop = tcg_canonicalize_memop(memop, 0, 0);
if (TCG_TARGET_HAS_new_ldst) {
*tcg_ctx.gen_opc_ptr++ = INDEX_op_qemu_ld_i32;
tcg_add_param_i32(val);
tcg_add_param_tl(addr);
*tcg_ctx.gen_opparam_ptr++ = memop;
*tcg_ctx.gen_opparam_ptr++ = idx;
return;
}
/* The old opcodes only support target-endian memory operations. */
assert((memop & MO_BSWAP) == MO_TE || (memop & MO_SIZE) == MO_8);
assert(old_ld_opc[memop & MO_SSIZE] != 0);
if (TCG_TARGET_REG_BITS == 32) {
*tcg_ctx.gen_opc_ptr++ = old_ld_opc[memop & MO_SSIZE];
tcg_add_param_i32(val);
tcg_add_param_tl(addr);
*tcg_ctx.gen_opparam_ptr++ = idx;
} else {
TCGv_i64 val64 = tcg_temp_new_i64();
*tcg_ctx.gen_opc_ptr++ = old_ld_opc[memop & MO_SSIZE];
tcg_add_param_i64(val64);
tcg_add_param_tl(addr);
*tcg_ctx.gen_opparam_ptr++ = idx;
tcg_gen_trunc_i64_i32(val, val64);
tcg_temp_free_i64(val64);
}
}
void tcg_gen_qemu_st_i32(TCGv_i32 val, TCGv addr, TCGArg idx, TCGMemOp memop)
{
memop = tcg_canonicalize_memop(memop, 0, 1);
if (TCG_TARGET_HAS_new_ldst) {
*tcg_ctx.gen_opc_ptr++ = INDEX_op_qemu_st_i32;
tcg_add_param_i32(val);
tcg_add_param_tl(addr);
*tcg_ctx.gen_opparam_ptr++ = memop;
*tcg_ctx.gen_opparam_ptr++ = idx;
return;
}
/* The old opcodes only support target-endian memory operations. */
assert((memop & MO_BSWAP) == MO_TE || (memop & MO_SIZE) == MO_8);
assert(old_st_opc[memop & MO_SIZE] != 0);
if (TCG_TARGET_REG_BITS == 32) {
*tcg_ctx.gen_opc_ptr++ = old_st_opc[memop & MO_SIZE];
tcg_add_param_i32(val);
tcg_add_param_tl(addr);
*tcg_ctx.gen_opparam_ptr++ = idx;
} else {
TCGv_i64 val64 = tcg_temp_new_i64();
tcg_gen_extu_i32_i64(val64, val);
*tcg_ctx.gen_opc_ptr++ = old_st_opc[memop & MO_SIZE];
tcg_add_param_i64(val64);
tcg_add_param_tl(addr);
*tcg_ctx.gen_opparam_ptr++ = idx;
tcg_temp_free_i64(val64);
}
}
void tcg_gen_qemu_ld_i64(TCGv_i64 val, TCGv addr, TCGArg idx, TCGMemOp memop)
{
memop = tcg_canonicalize_memop(memop, 1, 0);
#if TCG_TARGET_REG_BITS == 32
if ((memop & MO_SIZE) < MO_64) {
tcg_gen_qemu_ld_i32(TCGV_LOW(val), addr, idx, memop);
if (memop & MO_SIGN) {
tcg_gen_sari_i32(TCGV_HIGH(val), TCGV_LOW(val), 31);
} else {
tcg_gen_movi_i32(TCGV_HIGH(val), 0);
}
return;
}
#endif
if (TCG_TARGET_HAS_new_ldst) {
*tcg_ctx.gen_opc_ptr++ = INDEX_op_qemu_ld_i64;
tcg_add_param_i64(val);
tcg_add_param_tl(addr);
*tcg_ctx.gen_opparam_ptr++ = memop;
*tcg_ctx.gen_opparam_ptr++ = idx;
return;
}
/* The old opcodes only support target-endian memory operations. */
assert((memop & MO_BSWAP) == MO_TE || (memop & MO_SIZE) == MO_8);
assert(old_ld_opc[memop & MO_SSIZE] != 0);
*tcg_ctx.gen_opc_ptr++ = old_ld_opc[memop & MO_SSIZE];
tcg_add_param_i64(val);
tcg_add_param_tl(addr);
*tcg_ctx.gen_opparam_ptr++ = idx;
}
void tcg_gen_qemu_st_i64(TCGv_i64 val, TCGv addr, TCGArg idx, TCGMemOp memop)
{
memop = tcg_canonicalize_memop(memop, 1, 1);
#if TCG_TARGET_REG_BITS == 32
if ((memop & MO_SIZE) < MO_64) {
tcg_gen_qemu_st_i32(TCGV_LOW(val), addr, idx, memop);
return;
}
#endif
if (TCG_TARGET_HAS_new_ldst) {
*tcg_ctx.gen_opc_ptr++ = INDEX_op_qemu_st_i64;
tcg_add_param_i64(val);
tcg_add_param_tl(addr);
*tcg_ctx.gen_opparam_ptr++ = memop;
*tcg_ctx.gen_opparam_ptr++ = idx;
return;
}
/* The old opcodes only support target-endian memory operations. */
assert((memop & MO_BSWAP) == MO_TE || (memop & MO_SIZE) == MO_8);
assert(old_st_opc[memop & MO_SIZE] != 0);
*tcg_ctx.gen_opc_ptr++ = old_st_opc[memop & MO_SIZE];
tcg_add_param_i64(val);
tcg_add_param_tl(addr);
*tcg_ctx.gen_opparam_ptr++ = idx;
}
static void tcg_reg_alloc_start(TCGContext *s)
{
@ -851,47 +1049,14 @@ char *tcg_get_arg_str_i64(TCGContext *s, char *buf, int buf_size, TCGv_i64 arg)
return tcg_get_arg_str_idx(s, buf, buf_size, GET_TCGV_I64(arg));
}
static int helper_cmp(const void *p1, const void *p2)
/* Find helper name. */
static inline const char *tcg_find_helper(TCGContext *s, uintptr_t val)
{
const TCGHelperInfo *th1 = p1;
const TCGHelperInfo *th2 = p2;
if (th1->func < th2->func)
return -1;
else if (th1->func == th2->func)
return 0;
else
return 1;
}
/* find helper definition (Note: A hash table would be better) */
static TCGHelperInfo *tcg_find_helper(TCGContext *s, uintptr_t val)
{
int m, m_min, m_max;
TCGHelperInfo *th;
uintptr_t v;
if (unlikely(!s->helpers_sorted)) {
qsort(s->helpers, s->nb_helpers, sizeof(TCGHelperInfo),
helper_cmp);
s->helpers_sorted = 1;
const char *ret = NULL;
if (s->helpers) {
ret = g_hash_table_lookup(s->helpers, (gpointer)val);
}
/* binary search */
m_min = 0;
m_max = s->nb_helpers - 1;
while (m_min <= m_max) {
m = (m_min + m_max) >> 1;
th = &s->helpers[m];
v = th->func;
if (v == val)
return th;
else if (val < v) {
m_max = m - 1;
} else {
m_min = m + 1;
}
}
return NULL;
return ret;
}
static const char * const cond_name[] =
@ -910,6 +1075,22 @@ static const char * const cond_name[] =
[TCG_COND_GTU] = "gtu"
};
static const char * const ldst_name[] =
{
[MO_UB] = "ub",
[MO_SB] = "sb",
[MO_LEUW] = "leuw",
[MO_LESW] = "lesw",
[MO_LEUL] = "leul",
[MO_LESL] = "lesl",
[MO_LEQ] = "leq",
[MO_BEUW] = "beuw",
[MO_BESW] = "besw",
[MO_BEUL] = "beul",
[MO_BESL] = "besl",
[MO_BEQ] = "beq",
};
void tcg_dump_ops(TCGContext *s)
{
const uint16_t *opc_ptr;
@ -976,7 +1157,7 @@ void tcg_dump_ops(TCGContext *s)
}
} else if (c == INDEX_op_movi_i32 || c == INDEX_op_movi_i64) {
tcg_target_ulong val;
TCGHelperInfo *th;
const char *name;
nb_oargs = def->nb_oargs;
nb_iargs = def->nb_iargs;
@ -984,9 +1165,9 @@ void tcg_dump_ops(TCGContext *s)
qemu_log(" %s %s,$", def->name,
tcg_get_arg_str_idx(s, buf, sizeof(buf), args[0]));
val = args[1];
th = tcg_find_helper(s, val);
if (th) {
qemu_log("%s", th->name);
name = tcg_find_helper(s, val);
if (name) {
qemu_log("%s", name);
} else {
if (c == INDEX_op_movi_i32) {
qemu_log("0x%x", (uint32_t)val);
@ -1038,6 +1219,17 @@ void tcg_dump_ops(TCGContext *s)
}
i = 1;
break;
case INDEX_op_qemu_ld_i32:
case INDEX_op_qemu_st_i32:
case INDEX_op_qemu_ld_i64:
case INDEX_op_qemu_st_i64:
if (args[k] < ARRAY_SIZE(ldst_name) && ldst_name[args[k]]) {
qemu_log(",%s", ldst_name[args[k++]]);
} else {
qemu_log(",$0x%" TCG_PRIlx, args[k++]);
}
i = 1;
break;
default:
i = 0;
break;
@ -2311,6 +2503,8 @@ static inline int tcg_gen_code_common(TCGContext *s, uint8_t *gen_code_buf,
s->code_buf = gen_code_buf;
s->code_ptr = gen_code_buf;
tcg_out_tb_init(s);
args = s->gen_opparam_buf;
op_index = 0;
@ -2384,10 +2578,8 @@ static inline int tcg_gen_code_common(TCGContext *s, uint8_t *gen_code_buf,
#endif
}
the_end:
#if defined(CONFIG_QEMU_LDST_OPTIMIZATION) && defined(CONFIG_SOFTMMU)
/* Generate TB finalization at the end of block */
tcg_out_tb_finalize(s);
#endif
return -1;
}

166
tcg/tcg.h
View File

@ -197,6 +197,60 @@ typedef enum TCGType {
#endif
} TCGType;
/* Constants for qemu_ld and qemu_st for the Memory Operation field. */
typedef enum TCGMemOp {
MO_8 = 0,
MO_16 = 1,
MO_32 = 2,
MO_64 = 3,
MO_SIZE = 3, /* Mask for the above. */
MO_SIGN = 4, /* Sign-extended, otherwise zero-extended. */
MO_BSWAP = 8, /* Host reverse endian. */
#ifdef HOST_WORDS_BIGENDIAN
MO_LE = MO_BSWAP,
MO_BE = 0,
#else
MO_LE = 0,
MO_BE = MO_BSWAP,
#endif
#ifdef TARGET_WORDS_BIGENDIAN
MO_TE = MO_BE,
#else
MO_TE = MO_LE,
#endif
/* Combinations of the above, for ease of use. */
MO_UB = MO_8,
MO_UW = MO_16,
MO_UL = MO_32,
MO_SB = MO_SIGN | MO_8,
MO_SW = MO_SIGN | MO_16,
MO_SL = MO_SIGN | MO_32,
MO_Q = MO_64,
MO_LEUW = MO_LE | MO_UW,
MO_LEUL = MO_LE | MO_UL,
MO_LESW = MO_LE | MO_SW,
MO_LESL = MO_LE | MO_SL,
MO_LEQ = MO_LE | MO_Q,
MO_BEUW = MO_BE | MO_UW,
MO_BEUL = MO_BE | MO_UL,
MO_BESW = MO_BE | MO_SW,
MO_BESL = MO_BE | MO_SL,
MO_BEQ = MO_BE | MO_Q,
MO_TEUW = MO_TE | MO_UW,
MO_TEUL = MO_TE | MO_UL,
MO_TESW = MO_TE | MO_SW,
MO_TESL = MO_TE | MO_SL,
MO_TEQ = MO_TE | MO_Q,
MO_SSIZE = MO_SIZE | MO_SIGN,
} TCGMemOp;
typedef tcg_target_ulong TCGArg;
/* Define a type and accessor macros for variables. Using a struct is
@ -211,24 +265,6 @@ typedef tcg_target_ulong TCGArg;
are aliases for target_ulong and host pointer sized values respectively.
*/
#if defined(CONFIG_QEMU_LDST_OPTIMIZATION) && defined(CONFIG_SOFTMMU)
/* Macros/structures for qemu_ld/st IR code optimization:
TCG_MAX_HELPER_LABELS is defined as same as OPC_BUF_SIZE in exec-all.h. */
#define TCG_MAX_QEMU_LDST 640
typedef struct TCGLabelQemuLdst {
int is_ld:1; /* qemu_ld: 1, qemu_st: 0 */
int opc:4;
int addrlo_reg; /* reg index for low word of guest virtual addr */
int addrhi_reg; /* reg index for high word of guest virtual addr */
int datalo_reg; /* reg index for low word to be loaded or stored */
int datahi_reg; /* reg index for high word to be loaded or stored */
int mem_index; /* soft MMU memory index */
uint8_t *raddr; /* gen code addr of the next IR of qemu_ld/st IR */
uint8_t *label_ptr[2]; /* label pointers to be updated */
} TCGLabelQemuLdst;
#endif
#ifdef CONFIG_DEBUG_TCG
#define DEBUG_TCGV 1
#endif
@ -405,11 +441,6 @@ typedef struct TCGTemp {
const char *name;
} TCGTemp;
typedef struct TCGHelperInfo {
uintptr_t func;
const char *name;
} TCGHelperInfo;
typedef struct TCGContext TCGContext;
struct TCGContext {
@ -447,10 +478,7 @@ struct TCGContext {
uint8_t *code_ptr;
TCGTemp temps[TCG_MAX_TEMPS]; /* globals first, temps after */
TCGHelperInfo *helpers;
int nb_helpers;
int allocated_helpers;
int helpers_sorted;
GHashTable *helpers;
#ifdef CONFIG_PROFILER
/* profiling info */
@ -496,12 +524,8 @@ struct TCGContext {
TBContext tb_ctx;
#if defined(CONFIG_QEMU_LDST_OPTIMIZATION) && defined(CONFIG_SOFTMMU)
/* labels info for qemu_ld/st IRs
The labels help to generate TLB miss case codes at the end of TB */
TCGLabelQemuLdst *qemu_ldst_labels;
int nb_qemu_ldst_labels;
#endif
/* The TCGBackendData structure is private to tcg-target.c. */
struct TCGBackendData *be;
};
extern TCGContext tcg_ctx;
@ -680,8 +704,6 @@ TCGArg *tcg_optimize(TCGContext *s, uint16_t *tcg_opc_ptr, TCGArg *args,
TCGOpDef *tcg_op_def);
/* only used for debugging purposes */
void tcg_register_helper(void *func, const char *name);
const char *tcg_helper_get_name(TCGContext *s, void *func);
void tcg_dump_ops(TCGContext *s);
void dump_ops(const uint16_t *opc_buf, const TCGArg *opparam_buf);
@ -745,11 +767,6 @@ TCGv_i64 tcg_const_local_i64(int64_t val);
void tcg_register_jit(void *buf, size_t buf_size);
#if defined(CONFIG_QEMU_LDST_OPTIMIZATION) && defined(CONFIG_SOFTMMU)
/* Generate TB finalization at the end of block */
void tcg_out_tb_finalize(TCGContext *s);
#endif
/*
* Memory helpers that will be used by TCG generated code.
*/
@ -757,29 +774,66 @@ void tcg_out_tb_finalize(TCGContext *s);
/* Value zero-extended to tcg register size. */
tcg_target_ulong helper_ret_ldub_mmu(CPUArchState *env, target_ulong addr,
int mmu_idx, uintptr_t retaddr);
tcg_target_ulong helper_ret_lduw_mmu(CPUArchState *env, target_ulong addr,
int mmu_idx, uintptr_t retaddr);
tcg_target_ulong helper_ret_ldul_mmu(CPUArchState *env, target_ulong addr,
int mmu_idx, uintptr_t retaddr);
uint64_t helper_ret_ldq_mmu(CPUArchState *env, target_ulong addr,
int mmu_idx, uintptr_t retaddr);
tcg_target_ulong helper_le_lduw_mmu(CPUArchState *env, target_ulong addr,
int mmu_idx, uintptr_t retaddr);
tcg_target_ulong helper_le_ldul_mmu(CPUArchState *env, target_ulong addr,
int mmu_idx, uintptr_t retaddr);
uint64_t helper_le_ldq_mmu(CPUArchState *env, target_ulong addr,
int mmu_idx, uintptr_t retaddr);
tcg_target_ulong helper_be_lduw_mmu(CPUArchState *env, target_ulong addr,
int mmu_idx, uintptr_t retaddr);
tcg_target_ulong helper_be_ldul_mmu(CPUArchState *env, target_ulong addr,
int mmu_idx, uintptr_t retaddr);
uint64_t helper_be_ldq_mmu(CPUArchState *env, target_ulong addr,
int mmu_idx, uintptr_t retaddr);
/* Value sign-extended to tcg register size. */
tcg_target_ulong helper_ret_ldsb_mmu(CPUArchState *env, target_ulong addr,
int mmu_idx, uintptr_t retaddr);
tcg_target_ulong helper_ret_ldsw_mmu(CPUArchState *env, target_ulong addr,
int mmu_idx, uintptr_t retaddr);
tcg_target_ulong helper_ret_ldsl_mmu(CPUArchState *env, target_ulong addr,
int mmu_idx, uintptr_t retaddr);
tcg_target_ulong helper_le_ldsw_mmu(CPUArchState *env, target_ulong addr,
int mmu_idx, uintptr_t retaddr);
tcg_target_ulong helper_le_ldsl_mmu(CPUArchState *env, target_ulong addr,
int mmu_idx, uintptr_t retaddr);
tcg_target_ulong helper_be_ldsw_mmu(CPUArchState *env, target_ulong addr,
int mmu_idx, uintptr_t retaddr);
tcg_target_ulong helper_be_ldsl_mmu(CPUArchState *env, target_ulong addr,
int mmu_idx, uintptr_t retaddr);
void helper_ret_stb_mmu(CPUArchState *env, target_ulong addr, uint8_t val,
int mmu_idx, uintptr_t retaddr);
void helper_ret_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val,
int mmu_idx, uintptr_t retaddr);
void helper_ret_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
int mmu_idx, uintptr_t retaddr);
void helper_ret_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
int mmu_idx, uintptr_t retaddr);
void helper_le_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val,
int mmu_idx, uintptr_t retaddr);
void helper_le_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
int mmu_idx, uintptr_t retaddr);
void helper_le_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
int mmu_idx, uintptr_t retaddr);
void helper_be_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val,
int mmu_idx, uintptr_t retaddr);
void helper_be_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
int mmu_idx, uintptr_t retaddr);
void helper_be_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
int mmu_idx, uintptr_t retaddr);
/* Temporary aliases until backends are converted. */
#ifdef TARGET_WORDS_BIGENDIAN
# define helper_ret_ldsw_mmu helper_be_ldsw_mmu
# define helper_ret_lduw_mmu helper_be_lduw_mmu
# define helper_ret_ldsl_mmu helper_be_ldsl_mmu
# define helper_ret_ldul_mmu helper_be_ldul_mmu
# define helper_ret_ldq_mmu helper_be_ldq_mmu
# define helper_ret_stw_mmu helper_be_stw_mmu
# define helper_ret_stl_mmu helper_be_stl_mmu
# define helper_ret_stq_mmu helper_be_stq_mmu
#else
# define helper_ret_ldsw_mmu helper_le_ldsw_mmu
# define helper_ret_lduw_mmu helper_le_lduw_mmu
# define helper_ret_ldsl_mmu helper_le_ldsl_mmu
# define helper_ret_ldul_mmu helper_le_ldul_mmu
# define helper_ret_ldq_mmu helper_le_ldq_mmu
# define helper_ret_stw_mmu helper_le_stw_mmu
# define helper_ret_stl_mmu helper_le_stl_mmu
# define helper_ret_stq_mmu helper_le_stq_mmu
#endif
uint8_t helper_ldb_mmu(CPUArchState *env, target_ulong addr, int mmu_idx);
uint16_t helper_ldw_mmu(CPUArchState *env, target_ulong addr, int mmu_idx);

View File

@ -22,6 +22,8 @@
* THE SOFTWARE.
*/
#include "tcg-be-null.h"
/* TODO list:
* - See TODO comments in code.
*/

View File

@ -120,6 +120,8 @@
#define TCG_TARGET_HAS_mulsh_i64 0
#endif /* TCG_TARGET_REG_BITS == 64 */
#define TCG_TARGET_HAS_new_ldst 0
/* Number of registers available.
For 32 bit hosts, we need more than 8 registers (call arguments). */
/* #define TCG_TARGET_NB_REGS 8 */

View File

@ -1318,18 +1318,6 @@ static void tb_link_page(TranslationBlock *tb, tb_page_addr_t phys_pc,
mmap_unlock();
}
#if defined(CONFIG_QEMU_LDST_OPTIMIZATION) && defined(CONFIG_SOFTMMU)
/* check whether the given addr is in TCG generated code buffer or not */
bool is_tcg_gen_code(uintptr_t tc_ptr)
{
/* This can be called during code generation, code_gen_buffer_size
is used instead of code_gen_ptr for upper boundary checking */
return (tc_ptr >= (uintptr_t)tcg_ctx.code_gen_buffer &&
tc_ptr < (uintptr_t)(tcg_ctx.code_gen_buffer +
tcg_ctx.code_gen_buffer_size));
}
#endif
/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
tb[1].tc_ptr. Return NULL if not found */
static TranslationBlock *tb_find_pc(uintptr_t tc_ptr)