exec: Add both big- and little-endian memory helpers

Step three in the transition: helpers not tied to the target
"default" endianness.  To be used when the guest uses a memory
operation with non-default endianness.

Signed-off-by: Richard Henderson <rth@twiddle.net>
This commit is contained in:
Richard Henderson 2013-09-04 11:45:20 -07:00
parent f713d6ad7b
commit 867b3201a3
2 changed files with 306 additions and 53 deletions

View File

@ -70,6 +70,48 @@
#define ADDR_READ addr_read
#endif
#if DATA_SIZE == 8
# define BSWAP(X) bswap64(X)
#elif DATA_SIZE == 4
# define BSWAP(X) bswap32(X)
#elif DATA_SIZE == 2
# define BSWAP(X) bswap16(X)
#else
# define BSWAP(X) (X)
#endif
#ifdef TARGET_WORDS_BIGENDIAN
# define TGT_BE(X) (X)
# define TGT_LE(X) BSWAP(X)
#else
# define TGT_BE(X) BSWAP(X)
# define TGT_LE(X) (X)
#endif
#if DATA_SIZE == 1
# define helper_le_ld_name glue(glue(helper_ret_ld, USUFFIX), MMUSUFFIX)
# define helper_be_ld_name helper_le_ld_name
# define helper_le_lds_name glue(glue(helper_ret_ld, SSUFFIX), MMUSUFFIX)
# define helper_be_lds_name helper_le_lds_name
# define helper_le_st_name glue(glue(helper_ret_st, SUFFIX), MMUSUFFIX)
# define helper_be_st_name helper_le_st_name
#else
# define helper_le_ld_name glue(glue(helper_le_ld, USUFFIX), MMUSUFFIX)
# define helper_be_ld_name glue(glue(helper_be_ld, USUFFIX), MMUSUFFIX)
# define helper_le_lds_name glue(glue(helper_le_ld, SSUFFIX), MMUSUFFIX)
# define helper_be_lds_name glue(glue(helper_be_ld, SSUFFIX), MMUSUFFIX)
# define helper_le_st_name glue(glue(helper_le_st, SUFFIX), MMUSUFFIX)
# define helper_be_st_name glue(glue(helper_be_st, SUFFIX), MMUSUFFIX)
#endif
#ifdef TARGET_WORDS_BIGENDIAN
# define helper_te_ld_name helper_be_ld_name
# define helper_te_st_name helper_be_st_name
#else
# define helper_te_ld_name helper_le_ld_name
# define helper_te_st_name helper_le_st_name
#endif
static inline DATA_TYPE glue(io_read, SUFFIX)(CPUArchState *env,
hwaddr physaddr,
target_ulong addr,
@ -89,18 +131,16 @@ static inline DATA_TYPE glue(io_read, SUFFIX)(CPUArchState *env,
return val;
}
/* handle all cases except unaligned access which span two pages */
#ifdef SOFTMMU_CODE_ACCESS
static
static __attribute__((unused))
#endif
WORD_TYPE
glue(glue(helper_ret_ld, USUFFIX), MMUSUFFIX)(CPUArchState *env,
target_ulong addr, int mmu_idx,
uintptr_t retaddr)
WORD_TYPE helper_le_ld_name(CPUArchState *env, target_ulong addr, int mmu_idx,
uintptr_t retaddr)
{
int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
target_ulong tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ;
uintptr_t haddr;
DATA_TYPE res;
/* Adjust the given return address. */
retaddr -= GETPC_ADJ;
@ -124,7 +164,12 @@ glue(glue(helper_ret_ld, USUFFIX), MMUSUFFIX)(CPUArchState *env,
goto do_unaligned_access;
}
ioaddr = env->iotlb[mmu_idx][index];
return glue(io_read, SUFFIX)(env, ioaddr, addr, retaddr);
/* ??? Note that the io helpers always read data in the target
byte ordering. We should push the LE/BE request down into io. */
res = glue(io_read, SUFFIX)(env, ioaddr, addr, retaddr);
res = TGT_LE(res);
return res;
}
/* Handle slow unaligned access (it spans two pages or IO). */
@ -132,7 +177,7 @@ glue(glue(helper_ret_ld, USUFFIX), MMUSUFFIX)(CPUArchState *env,
&& unlikely((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1
>= TARGET_PAGE_SIZE)) {
target_ulong addr1, addr2;
DATA_TYPE res1, res2, res;
DATA_TYPE res1, res2;
unsigned shift;
do_unaligned_access:
#ifdef ALIGNED_ONLY
@ -142,16 +187,12 @@ glue(glue(helper_ret_ld, USUFFIX), MMUSUFFIX)(CPUArchState *env,
addr2 = addr1 + DATA_SIZE;
/* Note the adjustment at the beginning of the function.
Undo that for the recursion. */
res1 = glue(glue(helper_ret_ld, USUFFIX), MMUSUFFIX)
(env, addr1, mmu_idx, retaddr + GETPC_ADJ);
res2 = glue(glue(helper_ret_ld, USUFFIX), MMUSUFFIX)
(env, addr2, mmu_idx, retaddr + GETPC_ADJ);
res1 = helper_le_ld_name(env, addr1, mmu_idx, retaddr + GETPC_ADJ);
res2 = helper_le_ld_name(env, addr2, mmu_idx, retaddr + GETPC_ADJ);
shift = (addr & (DATA_SIZE - 1)) * 8;
#ifdef TARGET_WORDS_BIGENDIAN
res = (res1 << shift) | (res2 >> ((DATA_SIZE * 8) - shift));
#else
/* Little-endian combine. */
res = (res1 >> shift) | (res2 << ((DATA_SIZE * 8) - shift));
#endif
return res;
}
@ -163,16 +204,98 @@ glue(glue(helper_ret_ld, USUFFIX), MMUSUFFIX)(CPUArchState *env,
#endif
haddr = addr + env->tlb_table[mmu_idx][index].addend;
/* Note that ldl_raw is defined with type "int". */
return (DATA_TYPE) glue(glue(ld, LSUFFIX), _raw)((uint8_t *)haddr);
#if DATA_SIZE == 1
res = glue(glue(ld, LSUFFIX), _p)((uint8_t *)haddr);
#else
res = glue(glue(ld, LSUFFIX), _le_p)((uint8_t *)haddr);
#endif
return res;
}
#if DATA_SIZE > 1
#ifdef SOFTMMU_CODE_ACCESS
static __attribute__((unused))
#endif
WORD_TYPE helper_be_ld_name(CPUArchState *env, target_ulong addr, int mmu_idx,
uintptr_t retaddr)
{
int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
target_ulong tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ;
uintptr_t haddr;
DATA_TYPE res;
/* Adjust the given return address. */
retaddr -= GETPC_ADJ;
/* If the TLB entry is for a different page, reload and try again. */
if ((addr & TARGET_PAGE_MASK)
!= (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
#ifdef ALIGNED_ONLY
if ((addr & (DATA_SIZE - 1)) != 0) {
do_unaligned_access(env, addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
}
#endif
tlb_fill(env, addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ;
}
/* Handle an IO access. */
if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) {
hwaddr ioaddr;
if ((addr & (DATA_SIZE - 1)) != 0) {
goto do_unaligned_access;
}
ioaddr = env->iotlb[mmu_idx][index];
/* ??? Note that the io helpers always read data in the target
byte ordering. We should push the LE/BE request down into io. */
res = glue(io_read, SUFFIX)(env, ioaddr, addr, retaddr);
res = TGT_BE(res);
return res;
}
/* Handle slow unaligned access (it spans two pages or IO). */
if (DATA_SIZE > 1
&& unlikely((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1
>= TARGET_PAGE_SIZE)) {
target_ulong addr1, addr2;
DATA_TYPE res1, res2;
unsigned shift;
do_unaligned_access:
#ifdef ALIGNED_ONLY
do_unaligned_access(env, addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
#endif
addr1 = addr & ~(DATA_SIZE - 1);
addr2 = addr1 + DATA_SIZE;
/* Note the adjustment at the beginning of the function.
Undo that for the recursion. */
res1 = helper_be_ld_name(env, addr1, mmu_idx, retaddr + GETPC_ADJ);
res2 = helper_be_ld_name(env, addr2, mmu_idx, retaddr + GETPC_ADJ);
shift = (addr & (DATA_SIZE - 1)) * 8;
/* Big-endian combine. */
res = (res1 << shift) | (res2 >> ((DATA_SIZE * 8) - shift));
return res;
}
/* Handle aligned access or unaligned access in the same page. */
#ifdef ALIGNED_ONLY
if ((addr & (DATA_SIZE - 1)) != 0) {
do_unaligned_access(env, addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
}
#endif
haddr = addr + env->tlb_table[mmu_idx][index].addend;
res = glue(glue(ld, LSUFFIX), _be_p)((uint8_t *)haddr);
return res;
}
#endif /* DATA_SIZE > 1 */
DATA_TYPE
glue(glue(helper_ld, SUFFIX), MMUSUFFIX)(CPUArchState *env, target_ulong addr,
int mmu_idx)
{
return glue(glue(helper_ret_ld, USUFFIX), MMUSUFFIX)(env, addr, mmu_idx,
GETRA());
return helper_te_ld_name (env, addr, mmu_idx, GETRA());
}
#ifndef SOFTMMU_CODE_ACCESS
@ -180,14 +303,19 @@ glue(glue(helper_ld, SUFFIX), MMUSUFFIX)(CPUArchState *env, target_ulong addr,
/* Provide signed versions of the load routines as well. We can of course
avoid this for 64-bit data, or for 32-bit data on 32-bit host. */
#if DATA_SIZE * 8 < TCG_TARGET_REG_BITS
WORD_TYPE
glue(glue(helper_ret_ld, SSUFFIX), MMUSUFFIX)(CPUArchState *env,
target_ulong addr, int mmu_idx,
uintptr_t retaddr)
WORD_TYPE helper_le_lds_name(CPUArchState *env, target_ulong addr,
int mmu_idx, uintptr_t retaddr)
{
return (SDATA_TYPE) glue(glue(helper_ret_ld, USUFFIX), MMUSUFFIX)
(env, addr, mmu_idx, retaddr);
return (SDATA_TYPE)helper_le_ld_name(env, addr, mmu_idx, retaddr);
}
# if DATA_SIZE > 1
WORD_TYPE helper_be_lds_name(CPUArchState *env, target_ulong addr,
int mmu_idx, uintptr_t retaddr)
{
return (SDATA_TYPE)helper_be_ld_name(env, addr, mmu_idx, retaddr);
}
# endif
#endif
static inline void glue(io_write, SUFFIX)(CPUArchState *env,
@ -208,10 +336,8 @@ static inline void glue(io_write, SUFFIX)(CPUArchState *env,
io_mem_write(mr, physaddr, val, 1 << SHIFT);
}
void
glue(glue(helper_ret_st, SUFFIX), MMUSUFFIX)(CPUArchState *env,
target_ulong addr, DATA_TYPE val,
int mmu_idx, uintptr_t retaddr)
void helper_le_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
int mmu_idx, uintptr_t retaddr)
{
int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
target_ulong tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
@ -239,6 +365,10 @@ glue(glue(helper_ret_st, SUFFIX), MMUSUFFIX)(CPUArchState *env,
goto do_unaligned_access;
}
ioaddr = env->iotlb[mmu_idx][index];
/* ??? Note that the io helpers always read data in the target
byte ordering. We should push the LE/BE request down into io. */
val = TGT_LE(val);
glue(io_write, SUFFIX)(env, ioaddr, val, addr, retaddr);
return;
}
@ -256,11 +386,8 @@ glue(glue(helper_ret_st, SUFFIX), MMUSUFFIX)(CPUArchState *env,
/* Note: relies on the fact that tlb_fill() does not remove the
* previous page from the TLB cache. */
for (i = DATA_SIZE - 1; i >= 0; i--) {
#ifdef TARGET_WORDS_BIGENDIAN
uint8_t val8 = val >> (((DATA_SIZE - 1) * 8) - (i * 8));
#else
/* Little-endian extract. */
uint8_t val8 = val >> (i * 8);
#endif
/* Note the adjustment at the beginning of the function.
Undo that for the recursion. */
glue(helper_ret_stb, MMUSUFFIX)(env, addr + i, val8,
@ -277,15 +404,91 @@ glue(glue(helper_ret_st, SUFFIX), MMUSUFFIX)(CPUArchState *env,
#endif
haddr = addr + env->tlb_table[mmu_idx][index].addend;
glue(glue(st, SUFFIX), _raw)((uint8_t *)haddr, val);
#if DATA_SIZE == 1
glue(glue(st, SUFFIX), _p)((uint8_t *)haddr, val);
#else
glue(glue(st, SUFFIX), _le_p)((uint8_t *)haddr, val);
#endif
}
#if DATA_SIZE > 1
void helper_be_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
int mmu_idx, uintptr_t retaddr)
{
int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
target_ulong tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
uintptr_t haddr;
/* Adjust the given return address. */
retaddr -= GETPC_ADJ;
/* If the TLB entry is for a different page, reload and try again. */
if ((addr & TARGET_PAGE_MASK)
!= (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
#ifdef ALIGNED_ONLY
if ((addr & (DATA_SIZE - 1)) != 0) {
do_unaligned_access(env, addr, 1, mmu_idx, retaddr);
}
#endif
tlb_fill(env, addr, 1, mmu_idx, retaddr);
tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
}
/* Handle an IO access. */
if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) {
hwaddr ioaddr;
if ((addr & (DATA_SIZE - 1)) != 0) {
goto do_unaligned_access;
}
ioaddr = env->iotlb[mmu_idx][index];
/* ??? Note that the io helpers always read data in the target
byte ordering. We should push the LE/BE request down into io. */
val = TGT_BE(val);
glue(io_write, SUFFIX)(env, ioaddr, val, addr, retaddr);
return;
}
/* Handle slow unaligned access (it spans two pages or IO). */
if (DATA_SIZE > 1
&& unlikely((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1
>= TARGET_PAGE_SIZE)) {
int i;
do_unaligned_access:
#ifdef ALIGNED_ONLY
do_unaligned_access(env, addr, 1, mmu_idx, retaddr);
#endif
/* XXX: not efficient, but simple */
/* Note: relies on the fact that tlb_fill() does not remove the
* previous page from the TLB cache. */
for (i = DATA_SIZE - 1; i >= 0; i--) {
/* Big-endian extract. */
uint8_t val8 = val >> (((DATA_SIZE - 1) * 8) - (i * 8));
/* Note the adjustment at the beginning of the function.
Undo that for the recursion. */
glue(helper_ret_stb, MMUSUFFIX)(env, addr + i, val8,
mmu_idx, retaddr + GETPC_ADJ);
}
return;
}
/* Handle aligned access or unaligned access in the same page. */
#ifdef ALIGNED_ONLY
if ((addr & (DATA_SIZE - 1)) != 0) {
do_unaligned_access(env, addr, 1, mmu_idx, retaddr);
}
#endif
haddr = addr + env->tlb_table[mmu_idx][index].addend;
glue(glue(st, SUFFIX), _be_p)((uint8_t *)haddr, val);
}
#endif /* DATA_SIZE > 1 */
void
glue(glue(helper_st, SUFFIX), MMUSUFFIX)(CPUArchState *env, target_ulong addr,
DATA_TYPE val, int mmu_idx)
{
glue(glue(helper_ret_st, SUFFIX), MMUSUFFIX)(env, addr, val, mmu_idx,
GETRA());
helper_te_st_name(env, addr, val, mmu_idx, GETRA());
}
#endif /* !defined(SOFTMMU_CODE_ACCESS) */
@ -301,3 +504,16 @@ glue(glue(helper_st, SUFFIX), MMUSUFFIX)(CPUArchState *env, target_ulong addr,
#undef SDATA_TYPE
#undef USUFFIX
#undef SSUFFIX
#undef BSWAP
#undef TGT_BE
#undef TGT_LE
#undef CPU_BE
#undef CPU_LE
#undef helper_le_ld_name
#undef helper_be_ld_name
#undef helper_le_lds_name
#undef helper_be_lds_name
#undef helper_le_st_name
#undef helper_be_st_name
#undef helper_te_ld_name
#undef helper_te_st_name

View File

@ -774,29 +774,66 @@ void tcg_register_jit(void *buf, size_t buf_size);
/* Value zero-extended to tcg register size. */
tcg_target_ulong helper_ret_ldub_mmu(CPUArchState *env, target_ulong addr,
int mmu_idx, uintptr_t retaddr);
tcg_target_ulong helper_ret_lduw_mmu(CPUArchState *env, target_ulong addr,
int mmu_idx, uintptr_t retaddr);
tcg_target_ulong helper_ret_ldul_mmu(CPUArchState *env, target_ulong addr,
int mmu_idx, uintptr_t retaddr);
uint64_t helper_ret_ldq_mmu(CPUArchState *env, target_ulong addr,
int mmu_idx, uintptr_t retaddr);
tcg_target_ulong helper_le_lduw_mmu(CPUArchState *env, target_ulong addr,
int mmu_idx, uintptr_t retaddr);
tcg_target_ulong helper_le_ldul_mmu(CPUArchState *env, target_ulong addr,
int mmu_idx, uintptr_t retaddr);
uint64_t helper_le_ldq_mmu(CPUArchState *env, target_ulong addr,
int mmu_idx, uintptr_t retaddr);
tcg_target_ulong helper_be_lduw_mmu(CPUArchState *env, target_ulong addr,
int mmu_idx, uintptr_t retaddr);
tcg_target_ulong helper_be_ldul_mmu(CPUArchState *env, target_ulong addr,
int mmu_idx, uintptr_t retaddr);
uint64_t helper_be_ldq_mmu(CPUArchState *env, target_ulong addr,
int mmu_idx, uintptr_t retaddr);
/* Value sign-extended to tcg register size. */
tcg_target_ulong helper_ret_ldsb_mmu(CPUArchState *env, target_ulong addr,
int mmu_idx, uintptr_t retaddr);
tcg_target_ulong helper_ret_ldsw_mmu(CPUArchState *env, target_ulong addr,
int mmu_idx, uintptr_t retaddr);
tcg_target_ulong helper_ret_ldsl_mmu(CPUArchState *env, target_ulong addr,
int mmu_idx, uintptr_t retaddr);
tcg_target_ulong helper_le_ldsw_mmu(CPUArchState *env, target_ulong addr,
int mmu_idx, uintptr_t retaddr);
tcg_target_ulong helper_le_ldsl_mmu(CPUArchState *env, target_ulong addr,
int mmu_idx, uintptr_t retaddr);
tcg_target_ulong helper_be_ldsw_mmu(CPUArchState *env, target_ulong addr,
int mmu_idx, uintptr_t retaddr);
tcg_target_ulong helper_be_ldsl_mmu(CPUArchState *env, target_ulong addr,
int mmu_idx, uintptr_t retaddr);
void helper_ret_stb_mmu(CPUArchState *env, target_ulong addr, uint8_t val,
int mmu_idx, uintptr_t retaddr);
void helper_ret_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val,
int mmu_idx, uintptr_t retaddr);
void helper_ret_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
int mmu_idx, uintptr_t retaddr);
void helper_ret_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
int mmu_idx, uintptr_t retaddr);
void helper_le_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val,
int mmu_idx, uintptr_t retaddr);
void helper_le_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
int mmu_idx, uintptr_t retaddr);
void helper_le_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
int mmu_idx, uintptr_t retaddr);
void helper_be_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val,
int mmu_idx, uintptr_t retaddr);
void helper_be_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
int mmu_idx, uintptr_t retaddr);
void helper_be_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
int mmu_idx, uintptr_t retaddr);
/* Temporary aliases until backends are converted. */
#ifdef TARGET_WORDS_BIGENDIAN
# define helper_ret_ldsw_mmu helper_be_ldsw_mmu
# define helper_ret_lduw_mmu helper_be_lduw_mmu
# define helper_ret_ldsl_mmu helper_be_ldsl_mmu
# define helper_ret_ldul_mmu helper_be_ldul_mmu
# define helper_ret_ldq_mmu helper_be_ldq_mmu
# define helper_ret_stw_mmu helper_be_stw_mmu
# define helper_ret_stl_mmu helper_be_stl_mmu
# define helper_ret_stq_mmu helper_be_stq_mmu
#else
# define helper_ret_ldsw_mmu helper_le_ldsw_mmu
# define helper_ret_lduw_mmu helper_le_lduw_mmu
# define helper_ret_ldsl_mmu helper_le_ldsl_mmu
# define helper_ret_ldul_mmu helper_le_ldul_mmu
# define helper_ret_ldq_mmu helper_le_ldq_mmu
# define helper_ret_stw_mmu helper_le_stw_mmu
# define helper_ret_stl_mmu helper_le_stl_mmu
# define helper_ret_stq_mmu helper_le_stq_mmu
#endif
uint8_t helper_ldb_mmu(CPUArchState *env, target_ulong addr, int mmu_idx);
uint16_t helper_ldw_mmu(CPUArchState *env, target_ulong addr, int mmu_idx);