Add cpu_{ld,st}*_mmuidx_ra

Remove MMU_MODE*_SUFFIX
 Move tcg headers under include/
 -----BEGIN PGP SIGNATURE-----
 
 iQFRBAABCgA7FiEEekgeeIaLTbaoWgXAZN846K9+IV8FAl4fvikdHHJpY2hhcmQu
 aGVuZGVyc29uQGxpbmFyby5vcmcACgkQZN846K9+IV+R3gf8CITIUCoUyvAJFDp/
 5aSaArVP/3IlO37BSDYL1qu4uwJYmlGpdjNNZ5MaqhnQ9FbnaZxtIqDPIYOcUeOd
 MYFoD7G/uTfQqGNRtrDN88QEN0SVJVPis5/IGy9SpHSgopHwdbEeu8EmBb54Mwu6
 drWsUnLCYqDKg8bqn3bliIhh+T3vL+KROPJB2rxhnOUm4YelRvDLvCFFDHaNYf9f
 fYSAyjxqdjES8qX106SKU96zgaWOQy38GChKrYKgg9LECbZ/3SVUYHwQMsusMwg5
 DZZ1OlLPVdUtbkUt6nB2X5pFrNQf3oDUDeyFAH7D6C6nki42EqjEou4n5McFlZPF
 LS93ag==
 =NaTS
 -----END PGP SIGNATURE-----

Merge remote-tracking branch 'remotes/rth/tags/pull-tcg-20200115' into staging

Add cpu_{ld,st}*_mmuidx_ra
Remove MMU_MODE*_SUFFIX
Move tcg headers under include/

# gpg: Signature made Thu 16 Jan 2020 01:36:41 GMT
# gpg:                using RSA key 7A481E78868B4DB6A85A05C064DF38E8AF7E215F
# gpg:                issuer "richard.henderson@linaro.org"
# gpg: Good signature from "Richard Henderson <richard.henderson@linaro.org>" [full]
# Primary key fingerprint: 7A48 1E78 868B 4DB6 A85A  05C0 64DF 38E8 AF7E 215F

* remotes/rth/tags/pull-tcg-20200115: (34 commits)
  MAINTAINERS: Replace Claudio Fontana for tcg/aarch64
  configure: Remove tcg/ from the preprocessor include search list
  tcg: Move TCG headers to include/tcg/
  tcg: Search includes in the parent source directory
  tcg: Search includes from the project root source directory
  cputlb: Expand cpu_ldst_template.h in cputlb.c
  cputlb: Remove support for MMU_MODE*_SUFFIX
  target/ppc: Use cpu_*_mmuidx_ra instead of MMU_MODE*_SUFFIX
  target/s390x: Use cpu_*_mmuidx_ra instead of MMU_MODE*_SUFFIX
  target/mips: Use cpu_*_mmuidx_ra instead of MMU_MODE*_SUFFIX
  target/m68k: Use cpu_*_mmuidx_ra instead of MMU_MODE{0,1}_SUFFIX
  target/xtensa: Remove MMU_MODE{0,1,2,3}_SUFFIX
  target/unicore32: Remove MMU_MODE{0,1}_SUFFIX
  target/sh4: Remove MMU_MODE{0,1}_SUFFIX
  target/microblaze: Remove MMU_MODE{0,1,2}_SUFFIX
  target/i386: Remove MMU_MODE{0,1,2}_SUFFIX
  target/cris: Remove MMU_MODE{0,1}_SUFFIX
  target/alpha: Remove MMU_MODE{0,1}_SUFFIX
  target/nios2: Remove MMU_MODE{0,1}_SUFFIX
  cputlb: Expand cpu_ldst_useronly_template.h in user-exec.c
  ...

Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
Peter Maydell 2020-01-17 12:13:17 +00:00
commit cbf01142b2
90 changed files with 1037 additions and 1240 deletions

View File

@ -2382,6 +2382,7 @@ Common TCG code
M: Richard Henderson <rth@twiddle.net>
S: Maintained
F: tcg/
F: include/tcg/
TCG Plugins
M: Alex Bennée <alex.bennee@linaro.org>
@ -2391,8 +2392,7 @@ F: plugins/
F: tests/plugin
AArch64 TCG target
M: Claudio Fontana <claudio.fontana@huawei.com>
M: Claudio Fontana <claudio.fontana@gmail.com>
M: Richard Henderson <richard.henderson@linaro.org>
S: Maintained
L: qemu-arm@nongnu.org
F: tcg/aarch64/

View File

@ -64,13 +64,10 @@
the ATOMIC_NAME macro, and redefined below. */
#if DATA_SIZE == 1
# define END
# define MEND _be /* either le or be would be fine */
#elif defined(HOST_WORDS_BIGENDIAN)
# define END _be
# define MEND _be
#else
# define END _le
# define MEND _le
#endif
ABI_TYPE ATOMIC_NAME(cmpxchg)(CPUArchState *env, target_ulong addr,
@ -79,8 +76,8 @@ ABI_TYPE ATOMIC_NAME(cmpxchg)(CPUArchState *env, target_ulong addr,
ATOMIC_MMU_DECLS;
DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP;
DATA_TYPE ret;
uint16_t info = glue(trace_mem_build_info_no_se, MEND)(SHIFT, false,
ATOMIC_MMU_IDX);
uint16_t info = trace_mem_build_info(SHIFT, false, 0, false,
ATOMIC_MMU_IDX);
atomic_trace_rmw_pre(env, addr, info);
#if DATA_SIZE == 16
@ -99,8 +96,8 @@ ABI_TYPE ATOMIC_NAME(ld)(CPUArchState *env, target_ulong addr EXTRA_ARGS)
{
ATOMIC_MMU_DECLS;
DATA_TYPE val, *haddr = ATOMIC_MMU_LOOKUP;
uint16_t info = glue(trace_mem_build_info_no_se, MEND)(SHIFT, false,
ATOMIC_MMU_IDX);
uint16_t info = trace_mem_build_info(SHIFT, false, 0, false,
ATOMIC_MMU_IDX);
atomic_trace_ld_pre(env, addr, info);
val = atomic16_read(haddr);
@ -114,8 +111,8 @@ void ATOMIC_NAME(st)(CPUArchState *env, target_ulong addr,
{
ATOMIC_MMU_DECLS;
DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP;
uint16_t info = glue(trace_mem_build_info_no_se, MEND)(SHIFT, true,
ATOMIC_MMU_IDX);
uint16_t info = trace_mem_build_info(SHIFT, false, 0, true,
ATOMIC_MMU_IDX);
atomic_trace_st_pre(env, addr, info);
atomic16_set(haddr, val);
@ -130,8 +127,8 @@ ABI_TYPE ATOMIC_NAME(xchg)(CPUArchState *env, target_ulong addr,
ATOMIC_MMU_DECLS;
DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP;
DATA_TYPE ret;
uint16_t info = glue(trace_mem_build_info_no_se, MEND)(SHIFT, false,
ATOMIC_MMU_IDX);
uint16_t info = trace_mem_build_info(SHIFT, false, 0, false,
ATOMIC_MMU_IDX);
atomic_trace_rmw_pre(env, addr, info);
ret = atomic_xchg__nocheck(haddr, val);
@ -147,10 +144,8 @@ ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, target_ulong addr, \
ATOMIC_MMU_DECLS; \
DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP; \
DATA_TYPE ret; \
uint16_t info = glue(trace_mem_build_info_no_se, MEND)(SHIFT, \
false, \
ATOMIC_MMU_IDX); \
\
uint16_t info = trace_mem_build_info(SHIFT, false, 0, false, \
ATOMIC_MMU_IDX); \
atomic_trace_rmw_pre(env, addr, info); \
ret = atomic_##X(haddr, val); \
ATOMIC_MMU_CLEANUP; \
@ -183,10 +178,8 @@ ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, target_ulong addr, \
ATOMIC_MMU_DECLS; \
XDATA_TYPE *haddr = ATOMIC_MMU_LOOKUP; \
XDATA_TYPE cmp, old, new, val = xval; \
uint16_t info = glue(trace_mem_build_info_no_se, MEND)(SHIFT, \
false, \
ATOMIC_MMU_IDX); \
\
uint16_t info = trace_mem_build_info(SHIFT, false, 0, false, \
ATOMIC_MMU_IDX); \
atomic_trace_rmw_pre(env, addr, info); \
smp_mb(); \
cmp = atomic_read__nocheck(haddr); \
@ -213,7 +206,6 @@ GEN_ATOMIC_HELPER_FN(umax_fetch, MAX, DATA_TYPE, new)
#endif /* DATA SIZE >= 16 */
#undef END
#undef MEND
#if DATA_SIZE > 1
@ -221,10 +213,8 @@ GEN_ATOMIC_HELPER_FN(umax_fetch, MAX, DATA_TYPE, new)
within the ATOMIC_NAME macro. */
#ifdef HOST_WORDS_BIGENDIAN
# define END _le
# define MEND _le
#else
# define END _be
# define MEND _be
#endif
ABI_TYPE ATOMIC_NAME(cmpxchg)(CPUArchState *env, target_ulong addr,
@ -233,9 +223,8 @@ ABI_TYPE ATOMIC_NAME(cmpxchg)(CPUArchState *env, target_ulong addr,
ATOMIC_MMU_DECLS;
DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP;
DATA_TYPE ret;
uint16_t info = glue(trace_mem_build_info_no_se, MEND)(SHIFT,
false,
ATOMIC_MMU_IDX);
uint16_t info = trace_mem_build_info(SHIFT, false, MO_BSWAP, false,
ATOMIC_MMU_IDX);
atomic_trace_rmw_pre(env, addr, info);
#if DATA_SIZE == 16
@ -254,9 +243,8 @@ ABI_TYPE ATOMIC_NAME(ld)(CPUArchState *env, target_ulong addr EXTRA_ARGS)
{
ATOMIC_MMU_DECLS;
DATA_TYPE val, *haddr = ATOMIC_MMU_LOOKUP;
uint16_t info = glue(trace_mem_build_info_no_se, MEND)(SHIFT,
false,
ATOMIC_MMU_IDX);
uint16_t info = trace_mem_build_info(SHIFT, false, MO_BSWAP, false,
ATOMIC_MMU_IDX);
atomic_trace_ld_pre(env, addr, info);
val = atomic16_read(haddr);
@ -270,9 +258,8 @@ void ATOMIC_NAME(st)(CPUArchState *env, target_ulong addr,
{
ATOMIC_MMU_DECLS;
DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP;
uint16_t info = glue(trace_mem_build_info_no_se, MEND)(SHIFT,
true,
ATOMIC_MMU_IDX);
uint16_t info = trace_mem_build_info(SHIFT, false, MO_BSWAP, true,
ATOMIC_MMU_IDX);
val = BSWAP(val);
atomic_trace_st_pre(env, addr, info);
@ -289,9 +276,8 @@ ABI_TYPE ATOMIC_NAME(xchg)(CPUArchState *env, target_ulong addr,
ATOMIC_MMU_DECLS;
DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP;
ABI_TYPE ret;
uint16_t info = glue(trace_mem_build_info_no_se, MEND)(SHIFT,
false,
ATOMIC_MMU_IDX);
uint16_t info = trace_mem_build_info(SHIFT, false, MO_BSWAP, false,
ATOMIC_MMU_IDX);
atomic_trace_rmw_pre(env, addr, info);
ret = atomic_xchg__nocheck(haddr, BSWAP(val));
@ -307,10 +293,8 @@ ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, target_ulong addr, \
ATOMIC_MMU_DECLS; \
DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP; \
DATA_TYPE ret; \
uint16_t info = glue(trace_mem_build_info_no_se, MEND)(SHIFT, \
false, \
ATOMIC_MMU_IDX); \
\
uint16_t info = trace_mem_build_info(SHIFT, false, MO_BSWAP, \
false, ATOMIC_MMU_IDX); \
atomic_trace_rmw_pre(env, addr, info); \
ret = atomic_##X(haddr, BSWAP(val)); \
ATOMIC_MMU_CLEANUP; \
@ -341,10 +325,8 @@ ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, target_ulong addr, \
ATOMIC_MMU_DECLS; \
XDATA_TYPE *haddr = ATOMIC_MMU_LOOKUP; \
XDATA_TYPE ldo, ldn, old, new, val = xval; \
uint16_t info = glue(trace_mem_build_info_no_se, MEND)(SHIFT, \
false, \
ATOMIC_MMU_IDX); \
\
uint16_t info = trace_mem_build_info(SHIFT, false, MO_BSWAP, \
false, ATOMIC_MMU_IDX); \
atomic_trace_rmw_pre(env, addr, info); \
smp_mb(); \
ldn = atomic_read__nocheck(haddr); \
@ -378,7 +360,6 @@ GEN_ATOMIC_HELPER_FN(add_fetch, ADD, DATA_TYPE, new)
#endif /* DATA_SIZE >= 16 */
#undef END
#undef MEND
#endif /* DATA_SIZE > 1 */
#undef BSWAP

View File

@ -23,7 +23,7 @@
#include "trace.h"
#include "disas/disas.h"
#include "exec/exec-all.h"
#include "tcg.h"
#include "tcg/tcg.h"
#include "qemu/atomic.h"
#include "sysemu/qtest.h"
#include "qemu/timer.h"

View File

@ -34,6 +34,8 @@
#include "qemu/atomic.h"
#include "qemu/atomic128.h"
#include "translate-all.h"
#include "trace-root.h"
#include "trace/mem.h"
#ifdef CONFIG_PLUGIN
#include "qemu/plugin-memory.h"
#endif
@ -1625,6 +1627,137 @@ tcg_target_ulong helper_be_ldsl_mmu(CPUArchState *env, target_ulong addr,
return (int32_t)helper_be_ldul_mmu(env, addr, oi, retaddr);
}
/*
* Load helpers for cpu_ldst.h.
*/
static inline uint64_t cpu_load_helper(CPUArchState *env, abi_ptr addr,
int mmu_idx, uintptr_t retaddr,
MemOp op, FullLoadHelper *full_load)
{
uint16_t meminfo;
TCGMemOpIdx oi;
uint64_t ret;
meminfo = trace_mem_get_info(op, mmu_idx, false);
trace_guest_mem_before_exec(env_cpu(env), addr, meminfo);
op &= ~MO_SIGN;
oi = make_memop_idx(op, mmu_idx);
ret = full_load(env, addr, oi, retaddr);
qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, meminfo);
return ret;
}
uint32_t cpu_ldub_mmuidx_ra(CPUArchState *env, abi_ptr addr,
int mmu_idx, uintptr_t ra)
{
return cpu_load_helper(env, addr, mmu_idx, ra, MO_UB, full_ldub_mmu);
}
int cpu_ldsb_mmuidx_ra(CPUArchState *env, abi_ptr addr,
int mmu_idx, uintptr_t ra)
{
return (int8_t)cpu_load_helper(env, addr, mmu_idx, ra, MO_SB,
full_ldub_mmu);
}
uint32_t cpu_lduw_mmuidx_ra(CPUArchState *env, abi_ptr addr,
int mmu_idx, uintptr_t ra)
{
return cpu_load_helper(env, addr, mmu_idx, ra, MO_TEUW,
MO_TE == MO_LE
? full_le_lduw_mmu : full_be_lduw_mmu);
}
int cpu_ldsw_mmuidx_ra(CPUArchState *env, abi_ptr addr,
int mmu_idx, uintptr_t ra)
{
return (int16_t)cpu_load_helper(env, addr, mmu_idx, ra, MO_TESW,
MO_TE == MO_LE
? full_le_lduw_mmu : full_be_lduw_mmu);
}
uint32_t cpu_ldl_mmuidx_ra(CPUArchState *env, abi_ptr addr,
int mmu_idx, uintptr_t ra)
{
return cpu_load_helper(env, addr, mmu_idx, ra, MO_TEUL,
MO_TE == MO_LE
? full_le_ldul_mmu : full_be_ldul_mmu);
}
uint64_t cpu_ldq_mmuidx_ra(CPUArchState *env, abi_ptr addr,
int mmu_idx, uintptr_t ra)
{
return cpu_load_helper(env, addr, mmu_idx, ra, MO_TEQ,
MO_TE == MO_LE
? helper_le_ldq_mmu : helper_be_ldq_mmu);
}
uint32_t cpu_ldub_data_ra(CPUArchState *env, target_ulong ptr,
uintptr_t retaddr)
{
return cpu_ldub_mmuidx_ra(env, ptr, cpu_mmu_index(env, false), retaddr);
}
int cpu_ldsb_data_ra(CPUArchState *env, target_ulong ptr, uintptr_t retaddr)
{
return cpu_ldsb_mmuidx_ra(env, ptr, cpu_mmu_index(env, false), retaddr);
}
uint32_t cpu_lduw_data_ra(CPUArchState *env, target_ulong ptr,
uintptr_t retaddr)
{
return cpu_lduw_mmuidx_ra(env, ptr, cpu_mmu_index(env, false), retaddr);
}
int cpu_ldsw_data_ra(CPUArchState *env, target_ulong ptr, uintptr_t retaddr)
{
return cpu_ldsw_mmuidx_ra(env, ptr, cpu_mmu_index(env, false), retaddr);
}
uint32_t cpu_ldl_data_ra(CPUArchState *env, target_ulong ptr, uintptr_t retaddr)
{
return cpu_ldl_mmuidx_ra(env, ptr, cpu_mmu_index(env, false), retaddr);
}
uint64_t cpu_ldq_data_ra(CPUArchState *env, target_ulong ptr, uintptr_t retaddr)
{
return cpu_ldq_mmuidx_ra(env, ptr, cpu_mmu_index(env, false), retaddr);
}
uint32_t cpu_ldub_data(CPUArchState *env, target_ulong ptr)
{
return cpu_ldub_data_ra(env, ptr, 0);
}
int cpu_ldsb_data(CPUArchState *env, target_ulong ptr)
{
return cpu_ldsb_data_ra(env, ptr, 0);
}
uint32_t cpu_lduw_data(CPUArchState *env, target_ulong ptr)
{
return cpu_lduw_data_ra(env, ptr, 0);
}
int cpu_ldsw_data(CPUArchState *env, target_ulong ptr)
{
return cpu_ldsw_data_ra(env, ptr, 0);
}
uint32_t cpu_ldl_data(CPUArchState *env, target_ulong ptr)
{
return cpu_ldl_data_ra(env, ptr, 0);
}
uint64_t cpu_ldq_data(CPUArchState *env, target_ulong ptr)
{
return cpu_ldq_data_ra(env, ptr, 0);
}
/*
* Store Helpers
*/
@ -1854,6 +1987,94 @@ void helper_be_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
store_helper(env, addr, val, oi, retaddr, MO_BEQ);
}
/*
* Store Helpers for cpu_ldst.h
*/
static inline void QEMU_ALWAYS_INLINE
cpu_store_helper(CPUArchState *env, target_ulong addr, uint64_t val,
int mmu_idx, uintptr_t retaddr, MemOp op)
{
TCGMemOpIdx oi;
uint16_t meminfo;
meminfo = trace_mem_get_info(op, mmu_idx, true);
trace_guest_mem_before_exec(env_cpu(env), addr, meminfo);
oi = make_memop_idx(op, mmu_idx);
store_helper(env, addr, val, oi, retaddr, op);
qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, meminfo);
}
void cpu_stb_mmuidx_ra(CPUArchState *env, target_ulong addr, uint32_t val,
int mmu_idx, uintptr_t retaddr)
{
cpu_store_helper(env, addr, val, mmu_idx, retaddr, MO_UB);
}
void cpu_stw_mmuidx_ra(CPUArchState *env, target_ulong addr, uint32_t val,
int mmu_idx, uintptr_t retaddr)
{
cpu_store_helper(env, addr, val, mmu_idx, retaddr, MO_TEUW);
}
void cpu_stl_mmuidx_ra(CPUArchState *env, target_ulong addr, uint32_t val,
int mmu_idx, uintptr_t retaddr)
{
cpu_store_helper(env, addr, val, mmu_idx, retaddr, MO_TEUL);
}
void cpu_stq_mmuidx_ra(CPUArchState *env, target_ulong addr, uint64_t val,
int mmu_idx, uintptr_t retaddr)
{
cpu_store_helper(env, addr, val, mmu_idx, retaddr, MO_TEQ);
}
void cpu_stb_data_ra(CPUArchState *env, target_ulong ptr,
uint32_t val, uintptr_t retaddr)
{
cpu_stb_mmuidx_ra(env, ptr, val, cpu_mmu_index(env, false), retaddr);
}
void cpu_stw_data_ra(CPUArchState *env, target_ulong ptr,
uint32_t val, uintptr_t retaddr)
{
cpu_stw_mmuidx_ra(env, ptr, val, cpu_mmu_index(env, false), retaddr);
}
void cpu_stl_data_ra(CPUArchState *env, target_ulong ptr,
uint32_t val, uintptr_t retaddr)
{
cpu_stl_mmuidx_ra(env, ptr, val, cpu_mmu_index(env, false), retaddr);
}
void cpu_stq_data_ra(CPUArchState *env, target_ulong ptr,
uint64_t val, uintptr_t retaddr)
{
cpu_stq_mmuidx_ra(env, ptr, val, cpu_mmu_index(env, false), retaddr);
}
void cpu_stb_data(CPUArchState *env, target_ulong ptr, uint32_t val)
{
cpu_stb_data_ra(env, ptr, val, 0);
}
void cpu_stw_data(CPUArchState *env, target_ulong ptr, uint32_t val)
{
cpu_stw_data_ra(env, ptr, val, 0);
}
void cpu_stl_data(CPUArchState *env, target_ulong ptr, uint32_t val)
{
cpu_stl_data_ra(env, ptr, val, 0);
}
void cpu_stq_data(CPUArchState *env, target_ulong ptr, uint64_t val)
{
cpu_stq_data_ra(env, ptr, val, 0);
}
/* First set of helpers allows passing in of OI and RETADDR. This makes
them callable from other helpers. */
@ -1912,98 +2133,50 @@ void helper_be_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
/* Code access functions. */
static uint64_t full_ldub_cmmu(CPUArchState *env, target_ulong addr,
static uint64_t full_ldub_code(CPUArchState *env, target_ulong addr,
TCGMemOpIdx oi, uintptr_t retaddr)
{
return load_helper(env, addr, oi, retaddr, MO_8, true, full_ldub_cmmu);
return load_helper(env, addr, oi, retaddr, MO_8, true, full_ldub_code);
}
uint8_t helper_ret_ldub_cmmu(CPUArchState *env, target_ulong addr,
TCGMemOpIdx oi, uintptr_t retaddr)
uint32_t cpu_ldub_code(CPUArchState *env, abi_ptr addr)
{
return full_ldub_cmmu(env, addr, oi, retaddr);
TCGMemOpIdx oi = make_memop_idx(MO_UB, cpu_mmu_index(env, true));
return full_ldub_code(env, addr, oi, 0);
}
int8_t helper_ret_ldsb_cmmu(CPUArchState *env, target_ulong addr,
TCGMemOpIdx oi, uintptr_t retaddr)
static uint64_t full_lduw_code(CPUArchState *env, target_ulong addr,
TCGMemOpIdx oi, uintptr_t retaddr)
{
return (int8_t) full_ldub_cmmu(env, addr, oi, retaddr);
return load_helper(env, addr, oi, retaddr, MO_TEUW, true, full_lduw_code);
}
static uint64_t full_le_lduw_cmmu(CPUArchState *env, target_ulong addr,
TCGMemOpIdx oi, uintptr_t retaddr)
uint32_t cpu_lduw_code(CPUArchState *env, abi_ptr addr)
{
return load_helper(env, addr, oi, retaddr, MO_LEUW, true,
full_le_lduw_cmmu);
TCGMemOpIdx oi = make_memop_idx(MO_TEUW, cpu_mmu_index(env, true));
return full_lduw_code(env, addr, oi, 0);
}
uint16_t helper_le_lduw_cmmu(CPUArchState *env, target_ulong addr,
TCGMemOpIdx oi, uintptr_t retaddr)
static uint64_t full_ldl_code(CPUArchState *env, target_ulong addr,
TCGMemOpIdx oi, uintptr_t retaddr)
{
return full_le_lduw_cmmu(env, addr, oi, retaddr);
return load_helper(env, addr, oi, retaddr, MO_TEUL, true, full_ldl_code);
}
int16_t helper_le_ldsw_cmmu(CPUArchState *env, target_ulong addr,
TCGMemOpIdx oi, uintptr_t retaddr)
uint32_t cpu_ldl_code(CPUArchState *env, abi_ptr addr)
{
return (int16_t) full_le_lduw_cmmu(env, addr, oi, retaddr);
TCGMemOpIdx oi = make_memop_idx(MO_TEUL, cpu_mmu_index(env, true));
return full_ldl_code(env, addr, oi, 0);
}
static uint64_t full_be_lduw_cmmu(CPUArchState *env, target_ulong addr,
TCGMemOpIdx oi, uintptr_t retaddr)
static uint64_t full_ldq_code(CPUArchState *env, target_ulong addr,
TCGMemOpIdx oi, uintptr_t retaddr)
{
return load_helper(env, addr, oi, retaddr, MO_BEUW, true,
full_be_lduw_cmmu);
return load_helper(env, addr, oi, retaddr, MO_TEQ, true, full_ldq_code);
}
uint16_t helper_be_lduw_cmmu(CPUArchState *env, target_ulong addr,
TCGMemOpIdx oi, uintptr_t retaddr)
uint64_t cpu_ldq_code(CPUArchState *env, abi_ptr addr)
{
return full_be_lduw_cmmu(env, addr, oi, retaddr);
}
int16_t helper_be_ldsw_cmmu(CPUArchState *env, target_ulong addr,
TCGMemOpIdx oi, uintptr_t retaddr)
{
return (int16_t) full_be_lduw_cmmu(env, addr, oi, retaddr);
}
static uint64_t full_le_ldul_cmmu(CPUArchState *env, target_ulong addr,
TCGMemOpIdx oi, uintptr_t retaddr)
{
return load_helper(env, addr, oi, retaddr, MO_LEUL, true,
full_le_ldul_cmmu);
}
uint32_t helper_le_ldl_cmmu(CPUArchState *env, target_ulong addr,
TCGMemOpIdx oi, uintptr_t retaddr)
{
return full_le_ldul_cmmu(env, addr, oi, retaddr);
}
static uint64_t full_be_ldul_cmmu(CPUArchState *env, target_ulong addr,
TCGMemOpIdx oi, uintptr_t retaddr)
{
return load_helper(env, addr, oi, retaddr, MO_BEUL, true,
full_be_ldul_cmmu);
}
uint32_t helper_be_ldl_cmmu(CPUArchState *env, target_ulong addr,
TCGMemOpIdx oi, uintptr_t retaddr)
{
return full_be_ldul_cmmu(env, addr, oi, retaddr);
}
uint64_t helper_le_ldq_cmmu(CPUArchState *env, target_ulong addr,
TCGMemOpIdx oi, uintptr_t retaddr)
{
return load_helper(env, addr, oi, retaddr, MO_LEQ, true,
helper_le_ldq_cmmu);
}
uint64_t helper_be_ldq_cmmu(CPUArchState *env, target_ulong addr,
TCGMemOpIdx oi, uintptr_t retaddr)
{
return load_helper(env, addr, oi, retaddr, MO_BEQ, true,
helper_be_ldq_cmmu);
TCGMemOpIdx oi = make_memop_idx(MO_TEQ, cpu_mmu_index(env, true));
return full_ldq_code(env, addr, oi, 0);
}

View File

@ -21,7 +21,7 @@
#include "qemu/host-utils.h"
#include "cpu.h"
#include "exec/helper-proto.h"
#include "tcg-gvec-desc.h"
#include "tcg/tcg-gvec-desc.h"
/* Virtually all hosts support 16-byte vectors. Those that don't can emulate

View File

@ -30,6 +30,7 @@
#include "exec/tb-lookup.h"
#include "disas/disas.h"
#include "exec/log.h"
#include "tcg/tcg.h"
/* 32-bit helpers */

View File

@ -25,7 +25,7 @@
#include "trace.h"
#include "disas/disas.h"
#include "exec/exec-all.h"
#include "tcg.h"
#include "tcg/tcg.h"
#if defined(CONFIG_USER_ONLY)
#include "qemu.h"
#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)

View File

@ -20,12 +20,14 @@
#include "cpu.h"
#include "disas/disas.h"
#include "exec/exec-all.h"
#include "tcg.h"
#include "tcg/tcg.h"
#include "qemu/bitops.h"
#include "exec/cpu_ldst.h"
#include "translate-all.h"
#include "exec/helper-proto.h"
#include "qemu/atomic128.h"
#include "trace-root.h"
#include "trace/mem.h"
#undef EAX
#undef ECX
@ -734,6 +736,240 @@ int cpu_signal_handler(int host_signum, void *pinfo,
/* The softmmu versions of these helpers are in cputlb.c. */
uint32_t cpu_ldub_data(CPUArchState *env, abi_ptr ptr)
{
uint32_t ret;
uint16_t meminfo = trace_mem_get_info(MO_UB, MMU_USER_IDX, false);
trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo);
ret = ldub_p(g2h(ptr));
qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, meminfo);
return ret;
}
int cpu_ldsb_data(CPUArchState *env, abi_ptr ptr)
{
int ret;
uint16_t meminfo = trace_mem_get_info(MO_SB, MMU_USER_IDX, false);
trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo);
ret = ldsb_p(g2h(ptr));
qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, meminfo);
return ret;
}
uint32_t cpu_lduw_data(CPUArchState *env, abi_ptr ptr)
{
uint32_t ret;
uint16_t meminfo = trace_mem_get_info(MO_TEUW, MMU_USER_IDX, false);
trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo);
ret = lduw_p(g2h(ptr));
qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, meminfo);
return ret;
}
int cpu_ldsw_data(CPUArchState *env, abi_ptr ptr)
{
int ret;
uint16_t meminfo = trace_mem_get_info(MO_TESW, MMU_USER_IDX, false);
trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo);
ret = ldsw_p(g2h(ptr));
qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, meminfo);
return ret;
}
uint32_t cpu_ldl_data(CPUArchState *env, abi_ptr ptr)
{
uint32_t ret;
uint16_t meminfo = trace_mem_get_info(MO_TEUL, MMU_USER_IDX, false);
trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo);
ret = ldl_p(g2h(ptr));
qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, meminfo);
return ret;
}
uint64_t cpu_ldq_data(CPUArchState *env, abi_ptr ptr)
{
uint64_t ret;
uint16_t meminfo = trace_mem_get_info(MO_TEQ, MMU_USER_IDX, false);
trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo);
ret = ldq_p(g2h(ptr));
qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, meminfo);
return ret;
}
uint32_t cpu_ldub_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t retaddr)
{
uint32_t ret;
set_helper_retaddr(retaddr);
ret = cpu_ldub_data(env, ptr);
clear_helper_retaddr();
return ret;
}
int cpu_ldsb_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t retaddr)
{
int ret;
set_helper_retaddr(retaddr);
ret = cpu_ldsb_data(env, ptr);
clear_helper_retaddr();
return ret;
}
uint32_t cpu_lduw_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t retaddr)
{
uint32_t ret;
set_helper_retaddr(retaddr);
ret = cpu_lduw_data(env, ptr);
clear_helper_retaddr();
return ret;
}
int cpu_ldsw_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t retaddr)
{
int ret;
set_helper_retaddr(retaddr);
ret = cpu_ldsw_data(env, ptr);
clear_helper_retaddr();
return ret;
}
uint32_t cpu_ldl_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t retaddr)
{
uint32_t ret;
set_helper_retaddr(retaddr);
ret = cpu_ldl_data(env, ptr);
clear_helper_retaddr();
return ret;
}
uint64_t cpu_ldq_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t retaddr)
{
uint64_t ret;
set_helper_retaddr(retaddr);
ret = cpu_ldq_data(env, ptr);
clear_helper_retaddr();
return ret;
}
void cpu_stb_data(CPUArchState *env, abi_ptr ptr, uint32_t val)
{
uint16_t meminfo = trace_mem_get_info(MO_UB, MMU_USER_IDX, true);
trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo);
stb_p(g2h(ptr), val);
qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, meminfo);
}
void cpu_stw_data(CPUArchState *env, abi_ptr ptr, uint32_t val)
{
uint16_t meminfo = trace_mem_get_info(MO_TEUW, MMU_USER_IDX, true);
trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo);
stw_p(g2h(ptr), val);
qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, meminfo);
}
void cpu_stl_data(CPUArchState *env, abi_ptr ptr, uint32_t val)
{
uint16_t meminfo = trace_mem_get_info(MO_TEUL, MMU_USER_IDX, true);
trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo);
stl_p(g2h(ptr), val);
qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, meminfo);
}
void cpu_stq_data(CPUArchState *env, abi_ptr ptr, uint64_t val)
{
uint16_t meminfo = trace_mem_get_info(MO_TEQ, MMU_USER_IDX, true);
trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo);
stq_p(g2h(ptr), val);
qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, meminfo);
}
void cpu_stb_data_ra(CPUArchState *env, abi_ptr ptr,
uint32_t val, uintptr_t retaddr)
{
set_helper_retaddr(retaddr);
cpu_stb_data(env, ptr, val);
clear_helper_retaddr();
}
void cpu_stw_data_ra(CPUArchState *env, abi_ptr ptr,
uint32_t val, uintptr_t retaddr)
{
set_helper_retaddr(retaddr);
cpu_stw_data(env, ptr, val);
clear_helper_retaddr();
}
void cpu_stl_data_ra(CPUArchState *env, abi_ptr ptr,
uint32_t val, uintptr_t retaddr)
{
set_helper_retaddr(retaddr);
cpu_stl_data(env, ptr, val);
clear_helper_retaddr();
}
void cpu_stq_data_ra(CPUArchState *env, abi_ptr ptr,
uint64_t val, uintptr_t retaddr)
{
set_helper_retaddr(retaddr);
cpu_stq_data(env, ptr, val);
clear_helper_retaddr();
}
uint32_t cpu_ldub_code(CPUArchState *env, abi_ptr ptr)
{
uint32_t ret;
set_helper_retaddr(1);
ret = ldub_p(g2h(ptr));
clear_helper_retaddr();
return ret;
}
uint32_t cpu_lduw_code(CPUArchState *env, abi_ptr ptr)
{
uint32_t ret;
set_helper_retaddr(1);
ret = lduw_p(g2h(ptr));
clear_helper_retaddr();
return ret;
}
uint32_t cpu_ldl_code(CPUArchState *env, abi_ptr ptr)
{
uint32_t ret;
set_helper_retaddr(1);
ret = ldl_p(g2h(ptr));
clear_helper_retaddr();
return ret;
}
uint64_t cpu_ldq_code(CPUArchState *env, abi_ptr ptr)
{
uint64_t ret;
set_helper_retaddr(1);
ret = ldq_p(g2h(ptr));
clear_helper_retaddr();
return ret;
}
/* Do not allow unaligned operations to proceed. Return the host address. */
static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
int size, uintptr_t retaddr)

View File

@ -33,7 +33,7 @@
#include "qemu/module.h"
#include "cpu.h"
#include "exec/exec-all.h"
#include "tcg.h"
#include "tcg/tcg.h"
#include "qemu/timer.h"
#include "qemu/envlist.h"
#include "exec/log.h"

1
configure vendored
View File

@ -7437,7 +7437,6 @@ elif test "$ARCH" = "riscv32" || test "$ARCH" = "riscv64" ; then
else
QEMU_INCLUDES="-iquote \$(SRC_PATH)/tcg/\$(ARCH) $QEMU_INCLUDES"
fi
QEMU_INCLUDES="-iquote \$(SRC_PATH)/tcg $QEMU_INCLUDES"
echo "TOOLS=$tools" >> $config_host_mak
echo "ROMS=$roms" >> $config_host_mak

2
cpus.c
View File

@ -53,7 +53,7 @@
#include "qemu/bitmap.h"
#include "qemu/seqlock.h"
#include "qemu/guest-random.h"
#include "tcg.h"
#include "tcg/tcg.h"
#include "hw/nmi.h"
#include "sysemu/replay.h"
#include "sysemu/runstate.h"

View File

@ -72,31 +72,34 @@ Regexes for git grep
- ``\<ldn_\([hbl]e\)?_p\>``
- ``\<stn_\([hbl]e\)?_p\>``
``cpu_{ld,st}_*``
~~~~~~~~~~~~~~~~~
``cpu_{ld,st}*_mmuidx_ra``
~~~~~~~~~~~~~~~~~~~~~~~~~~
These functions operate on a guest virtual address. Be aware
that these functions may cause a guest CPU exception to be
taken (e.g. for an alignment fault or MMU fault) which will
result in guest CPU state being updated and control longjumping
out of the function call. They should therefore only be used
in code that is implementing emulation of the target CPU.
These functions operate on a guest virtual address plus a context,
known as a "mmu index" or ``mmuidx``, which controls how that virtual
address is translated. The meaning of the indexes are target specific,
but specifying a particular index might be necessary if, for instance,
the helper requires an "always as non-privileged" access rather that
the default access for the current state of the guest CPU.
These functions may throw an exception (longjmp() back out
to the top level TCG loop). This means they must only be used
from helper functions where the translator has saved all
necessary CPU state before generating the helper function call.
It's usually better to use the ``_ra`` variants described below
from helper functions, but these functions are the right choice
for calls made from hooks like the CPU do_interrupt hook or
when you know for certain that the translator had to save all
the CPU state that ``cpu_restore_state()`` would restore anyway.
These functions may cause a guest CPU exception to be taken
(e.g. for an alignment fault or MMU fault) which will result in
guest CPU state being updated and control longjmp'ing out of the
function call. They should therefore only be used in code that is
implementing emulation of the guest CPU.
The ``retaddr`` parameter is used to control unwinding of the
guest CPU state in case of a guest CPU exception. This is passed
to ``cpu_restore_state()``. Therefore the value should either be 0,
to indicate that the guest CPU state is already synchronized, or
the result of ``GETPC()`` from the top level ``HELPER(foo)``
function, which is a return address into the generated code.
Function names follow the pattern:
load: ``cpu_ld{sign}{size}_{mmusuffix}(env, ptr)``
load: ``cpu_ld{sign}{size}_mmuidx_ra(env, ptr, mmuidx, retaddr)``
store: ``cpu_st{size}_{mmusuffix}(env, ptr, val)``
store: ``cpu_st{size}_mmuidx_ra(env, ptr, val, mmuidx, retaddr)``
``sign``
- (empty) : for 32 or 64 bit sizes
@ -109,56 +112,151 @@ store: ``cpu_st{size}_{mmusuffix}(env, ptr, val)``
- ``l`` : 32 bits
- ``q`` : 64 bits
``mmusuffix`` is one of the generic suffixes ``data`` or ``code``, or
(for softmmu configs) a target-specific MMU mode suffix as defined
in the target's ``cpu.h``.
Regexes for git grep:
- ``\<cpu_ld[us]\?[bwlq]_mmuidx_ra\>``
- ``\<cpu_st[bwlq]_mmuidx_ra\>``
Regexes for git grep
- ``\<cpu_ld[us]\?[bwlq]_[a-zA-Z0-9]\+\>``
- ``\<cpu_st[bwlq]_[a-zA-Z0-9]\+\>``
``cpu_{ld,st}*_data_ra``
~~~~~~~~~~~~~~~~~~~~~~~~
``cpu_{ld,st}_*_ra``
~~~~~~~~~~~~~~~~~~~~
These functions work like the ``cpu_{ld,st}_*`` functions except
that they also take a ``retaddr`` argument. This extra argument
allows for correct unwinding of any exception that is taken,
and should generally be the result of GETPC() called directly
from the top level HELPER(foo) function (i.e. the return address
in the generated code).
These functions work like the ``cpu_{ld,st}_mmuidx_ra`` functions
except that the ``mmuidx`` parameter is taken from the current mode
of the guest CPU, as determined by ``cpu_mmu_index(env, false)``.
These are generally the preferred way to do accesses by guest
virtual address from helper functions; see the documentation
of the non-``_ra`` variants for when those would be better.
Calling these functions with a ``retaddr`` argument of 0 is
equivalent to calling the non-``_ra`` version of the function.
virtual address from helper functions, unless the access should
be performed with a context other than the default.
Function names follow the pattern:
load: ``cpu_ld{sign}{size}_{mmusuffix}_ra(env, ptr, retaddr)``
load: ``cpu_ld{sign}{size}_data_ra(env, ptr, ra)``
store: ``cpu_st{sign}{size}_{mmusuffix}_ra(env, ptr, val, retaddr)``
store: ``cpu_st{size}_data_ra(env, ptr, val, ra)``
``sign``
- (empty) : for 32 or 64 bit sizes
- ``u`` : unsigned
- ``s`` : signed
``size``
- ``b`` : 8 bits
- ``w`` : 16 bits
- ``l`` : 32 bits
- ``q`` : 64 bits
Regexes for git grep:
- ``\<cpu_ld[us]\?[bwlq]_data_ra\>``
- ``\<cpu_st[bwlq]_data_ra\>``
``cpu_{ld,st}*_data``
~~~~~~~~~~~~~~~~~~~~~
These functions work like the ``cpu_{ld,st}_data_ra`` functions
except that the ``retaddr`` parameter is 0, and thus does not
unwind guest CPU state.
This means they must only be used from helper functions where the
translator has saved all necessary CPU state. These functions are
the right choice for calls made from hooks like the CPU ``do_interrupt``
hook or when you know for certain that the translator had to save all
the CPU state anyway.
Function names follow the pattern:
load: ``cpu_ld{sign}{size}_data(env, ptr)``
store: ``cpu_st{size}_data(env, ptr, val)``
``sign``
- (empty) : for 32 or 64 bit sizes
- ``u`` : unsigned
- ``s`` : signed
``size``
- ``b`` : 8 bits
- ``w`` : 16 bits
- ``l`` : 32 bits
- ``q`` : 64 bits
Regexes for git grep
- ``\<cpu_ld[us]\?[bwlq]_[a-zA-Z0-9]\+_ra\>``
- ``\<cpu_st[bwlq]_[a-zA-Z0-9]\+_ra\>``
- ``\<cpu_ld[us]\?[bwlq]_data\>``
- ``\<cpu_st[bwlq]_data\+\>``
``helper_*_{ld,st}*mmu``
~~~~~~~~~~~~~~~~~~~~~~~~
``cpu_ld*_code``
~~~~~~~~~~~~~~~~
These functions perform a read for instruction execution. The ``mmuidx``
parameter is taken from the current mode of the guest CPU, as determined
by ``cpu_mmu_index(env, true)``. The ``retaddr`` parameter is 0, and
thus does not unwind guest CPU state, because CPU state is always
synchronized while translating instructions. Any guest CPU exception
that is raised will indicate an instruction execution fault rather than
a data read fault.
In general these functions should not be used directly during translation.
There are wrapper functions that are to be used which also take care of
plugins for tracing.
Function names follow the pattern:
load: ``cpu_ld{sign}{size}_code(env, ptr)``
``sign``
- (empty) : for 32 or 64 bit sizes
- ``u`` : unsigned
- ``s`` : signed
``size``
- ``b`` : 8 bits
- ``w`` : 16 bits
- ``l`` : 32 bits
- ``q`` : 64 bits
Regexes for git grep:
- ``\<cpu_ld[us]\?[bwlq]_code\>``
``translator_ld*``
~~~~~~~~~~~~~~~~~~
These functions are a wrapper for ``cpu_ld*_code`` which also perform
any actions required by any tracing plugins. They are only to be
called during the translator callback ``translate_insn``.
There is a set of functions ending in ``_swap`` which, if the parameter
is true, returns the value in the endianness that is the reverse of
the guest native endianness, as determined by ``TARGET_WORDS_BIGENDIAN``.
Function names follow the pattern:
load: ``translator_ld{sign}{size}(env, ptr)``
swap: ``translator_ld{sign}{size}_swap(env, ptr, swap)``
``sign``
- (empty) : for 32 or 64 bit sizes
- ``u`` : unsigned
- ``s`` : signed
``size``
- ``b`` : 8 bits
- ``w`` : 16 bits
- ``l`` : 32 bits
- ``q`` : 64 bits
Regexes for git grep
- ``\<translator_ld[us]\?[bwlq]\(_swap\)\?\>``
``helper_*_{ld,st}*_mmu``
~~~~~~~~~~~~~~~~~~~~~~~~~
These functions are intended primarily to be called by the code
generated by the TCG backend. They may also be called by target
CPU helper function code. Like the ``cpu_{ld,st}_*_ra`` functions
they perform accesses by guest virtual address; the difference is
that these functions allow you to specify an ``opindex`` parameter
which encodes (among other things) the mmu index to use for the
access. This is necessary if your helper needs to make an access
via a specific mmu index (for instance, an "always as non-privileged"
access) rather than using the default mmu index for the current state
of the guest CPU.
CPU helper function code. Like the ``cpu_{ld,st}_mmuidx_ra`` functions
they perform accesses by guest virtual address, with a given ``mmuidx``.
The ``opindex`` parameter should be created by calling ``make_memop_idx()``.
These functions specify an ``opindex`` parameter which encodes
(among other things) the mmu index to use for the access. This parameter
should be created by calling ``make_memop_idx()``.
The ``retaddr`` parameter should be the result of GETPC() called directly
from the top level HELPER(foo) function (or 0 if no guest CPU state
@ -166,13 +264,12 @@ unwinding is required).
**TODO** The names of these functions are a bit odd for historical
reasons because they were originally expected to be called only from
within generated code. We should rename them to bring them
more in line with the other memory access functions.
within generated code. We should rename them to bring them more in
line with the other memory access functions. The explicit endianness
is the only feature they have beyond ``*_mmuidx_ra``.
load: ``helper_{endian}_ld{sign}{size}_mmu(env, addr, opindex, retaddr)``
load (code): ``helper_{endian}_ld{sign}{size}_cmmu(env, addr, opindex, retaddr)``
store: ``helper_{endian}_st{size}_mmu(env, addr, val, opindex, retaddr)``
``sign``
@ -192,7 +289,7 @@ store: ``helper_{endian}_st{size}_mmu(env, addr, val, opindex, retaddr)``
- ``ret`` : target endianness
Regexes for git grep
- ``\<helper_\(le\|be\|ret\)_ld[us]\?[bwlq]_c\?mmu\>``
- ``\<helper_\(le\|be\|ret\)_ld[us]\?[bwlq]_mmu\>``
- ``\<helper_\(le\|be\|ret\)_st[bwlq]_mmu\>``
``address_space_*``

2
exec.c
View File

@ -25,7 +25,7 @@
#include "cpu.h"
#include "exec/exec-all.h"
#include "exec/target_page.h"
#include "tcg.h"
#include "tcg/tcg.h"
#include "hw/qdev-core.h"
#include "hw/qdev-properties.h"
#if !defined(CONFIG_USER_ONLY)

View File

@ -25,9 +25,13 @@
*
* The syntax for the accessors is:
*
* load: cpu_ld{sign}{size}_{mmusuffix}(env, ptr)
* load: cpu_ld{sign}{size}_{mmusuffix}(env, ptr)
* cpu_ld{sign}{size}_{mmusuffix}_ra(env, ptr, retaddr)
* cpu_ld{sign}{size}_mmuidx_ra(env, ptr, mmu_idx, retaddr)
*
* store: cpu_st{sign}{size}_{mmusuffix}(env, ptr, val)
* store: cpu_st{size}_{mmusuffix}(env, ptr, val)
* cpu_st{size}_{mmusuffix}_ra(env, ptr, val, retaddr)
* cpu_st{size}_mmuidx_ra(env, ptr, val, mmu_idx, retaddr)
*
* sign is:
* (empty): for 32 and 64 bit sizes
@ -40,9 +44,10 @@
* l: 32 bits
* q: 64 bits
*
* mmusuffix is one of the generic suffixes "data" or "code", or
* (for softmmu configs) a target-specific MMU mode suffix as defined
* in target cpu.h.
* mmusuffix is one of the generic suffixes "data" or "code", or "mmuidx".
* The "mmuidx" suffix carries an extra mmu_idx argument that specifies
* the index to use; the "data" and "code" suffixes take the index from
* cpu_mmu_index().
*/
#ifndef CPU_LDST_H
#define CPU_LDST_H
@ -89,6 +94,34 @@ typedef target_ulong abi_ptr;
#define TARGET_ABI_FMT_ptr TARGET_ABI_FMT_lx
#endif
uint32_t cpu_ldub_data(CPUArchState *env, abi_ptr ptr);
uint32_t cpu_lduw_data(CPUArchState *env, abi_ptr ptr);
uint32_t cpu_ldl_data(CPUArchState *env, abi_ptr ptr);
uint64_t cpu_ldq_data(CPUArchState *env, abi_ptr ptr);
int cpu_ldsb_data(CPUArchState *env, abi_ptr ptr);
int cpu_ldsw_data(CPUArchState *env, abi_ptr ptr);
uint32_t cpu_ldub_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t retaddr);
uint32_t cpu_lduw_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t retaddr);
uint32_t cpu_ldl_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t retaddr);
uint64_t cpu_ldq_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t retaddr);
int cpu_ldsb_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t retaddr);
int cpu_ldsw_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t retaddr);
void cpu_stb_data(CPUArchState *env, abi_ptr ptr, uint32_t val);
void cpu_stw_data(CPUArchState *env, abi_ptr ptr, uint32_t val);
void cpu_stl_data(CPUArchState *env, abi_ptr ptr, uint32_t val);
void cpu_stq_data(CPUArchState *env, abi_ptr ptr, uint64_t val);
void cpu_stb_data_ra(CPUArchState *env, abi_ptr ptr,
uint32_t val, uintptr_t retaddr);
void cpu_stw_data_ra(CPUArchState *env, abi_ptr ptr,
uint32_t val, uintptr_t retaddr);
void cpu_stl_data_ra(CPUArchState *env, abi_ptr ptr,
uint32_t val, uintptr_t retaddr);
void cpu_stq_data_ra(CPUArchState *env, abi_ptr ptr,
uint64_t val, uintptr_t retaddr);
#if defined(CONFIG_USER_ONLY)
extern __thread uintptr_t helper_retaddr;
@ -113,47 +146,75 @@ static inline void clear_helper_retaddr(void)
helper_retaddr = 0;
}
/* In user-only mode we provide only the _code and _data accessors. */
#define MEMSUFFIX _data
#define DATA_SIZE 1
#include "exec/cpu_ldst_useronly_template.h"
#define DATA_SIZE 2
#include "exec/cpu_ldst_useronly_template.h"
#define DATA_SIZE 4
#include "exec/cpu_ldst_useronly_template.h"
#define DATA_SIZE 8
#include "exec/cpu_ldst_useronly_template.h"
#undef MEMSUFFIX
/*
* Code access is deprecated in favour of translator_ld* functions
* (see translator.h). However there are still users that need to
* converted so for now these stay.
* Provide the same *_mmuidx_ra interface as for softmmu.
* The mmu_idx argument is ignored.
*/
#define MEMSUFFIX _code
#define CODE_ACCESS
#define DATA_SIZE 1
#include "exec/cpu_ldst_useronly_template.h"
#define DATA_SIZE 2
#include "exec/cpu_ldst_useronly_template.h"
static inline uint32_t cpu_ldub_mmuidx_ra(CPUArchState *env, abi_ptr addr,
int mmu_idx, uintptr_t ra)
{
return cpu_ldub_data_ra(env, addr, ra);
}
#define DATA_SIZE 4
#include "exec/cpu_ldst_useronly_template.h"
static inline uint32_t cpu_lduw_mmuidx_ra(CPUArchState *env, abi_ptr addr,
int mmu_idx, uintptr_t ra)
{
return cpu_lduw_data_ra(env, addr, ra);
}
#define DATA_SIZE 8
#include "exec/cpu_ldst_useronly_template.h"
#undef MEMSUFFIX
#undef CODE_ACCESS
static inline uint32_t cpu_ldl_mmuidx_ra(CPUArchState *env, abi_ptr addr,
int mmu_idx, uintptr_t ra)
{
return cpu_ldl_data_ra(env, addr, ra);
}
static inline uint64_t cpu_ldq_mmuidx_ra(CPUArchState *env, abi_ptr addr,
int mmu_idx, uintptr_t ra)
{
return cpu_ldq_data_ra(env, addr, ra);
}
static inline int cpu_ldsb_mmuidx_ra(CPUArchState *env, abi_ptr addr,
int mmu_idx, uintptr_t ra)
{
return cpu_ldsb_data_ra(env, addr, ra);
}
static inline int cpu_ldsw_mmuidx_ra(CPUArchState *env, abi_ptr addr,
int mmu_idx, uintptr_t ra)
{
return cpu_ldsw_data_ra(env, addr, ra);
}
static inline void cpu_stb_mmuidx_ra(CPUArchState *env, abi_ptr addr,
uint32_t val, int mmu_idx, uintptr_t ra)
{
cpu_stb_data_ra(env, addr, val, ra);
}
static inline void cpu_stw_mmuidx_ra(CPUArchState *env, abi_ptr addr,
uint32_t val, int mmu_idx, uintptr_t ra)
{
cpu_stw_data_ra(env, addr, val, ra);
}
static inline void cpu_stl_mmuidx_ra(CPUArchState *env, abi_ptr addr,
uint32_t val, int mmu_idx, uintptr_t ra)
{
cpu_stl_data_ra(env, addr, val, ra);
}
static inline void cpu_stq_mmuidx_ra(CPUArchState *env, abi_ptr addr,
uint64_t val, int mmu_idx, uintptr_t ra)
{
cpu_stq_data_ra(env, addr, val, ra);
}
#else
/* The memory helpers for tcg-generated code need tcg_target_long etc. */
#include "tcg.h"
/* Needed for TCG_OVERSIZED_GUEST */
#include "tcg/tcg.h"
static inline target_ulong tlb_addr_write(const CPUTLBEntry *entry)
{
@ -185,281 +246,46 @@ static inline CPUTLBEntry *tlb_entry(CPUArchState *env, uintptr_t mmu_idx,
return &env_tlb(env)->f[mmu_idx].table[tlb_index(env, mmu_idx, addr)];
}
#ifdef MMU_MODE0_SUFFIX
#define CPU_MMU_INDEX 0
#define MEMSUFFIX MMU_MODE0_SUFFIX
#define DATA_SIZE 1
#include "exec/cpu_ldst_template.h"
#define DATA_SIZE 2
#include "exec/cpu_ldst_template.h"
#define DATA_SIZE 4
#include "exec/cpu_ldst_template.h"
#define DATA_SIZE 8
#include "exec/cpu_ldst_template.h"
#undef CPU_MMU_INDEX
#undef MEMSUFFIX
#endif
#if (NB_MMU_MODES >= 2) && defined(MMU_MODE1_SUFFIX)
#define CPU_MMU_INDEX 1
#define MEMSUFFIX MMU_MODE1_SUFFIX
#define DATA_SIZE 1
#include "exec/cpu_ldst_template.h"
#define DATA_SIZE 2
#include "exec/cpu_ldst_template.h"
#define DATA_SIZE 4
#include "exec/cpu_ldst_template.h"
#define DATA_SIZE 8
#include "exec/cpu_ldst_template.h"
#undef CPU_MMU_INDEX
#undef MEMSUFFIX
#endif
#if (NB_MMU_MODES >= 3) && defined(MMU_MODE2_SUFFIX)
#define CPU_MMU_INDEX 2
#define MEMSUFFIX MMU_MODE2_SUFFIX
#define DATA_SIZE 1
#include "exec/cpu_ldst_template.h"
#define DATA_SIZE 2
#include "exec/cpu_ldst_template.h"
#define DATA_SIZE 4
#include "exec/cpu_ldst_template.h"
#define DATA_SIZE 8
#include "exec/cpu_ldst_template.h"
#undef CPU_MMU_INDEX
#undef MEMSUFFIX
#endif /* (NB_MMU_MODES >= 3) */
#if (NB_MMU_MODES >= 4) && defined(MMU_MODE3_SUFFIX)
#define CPU_MMU_INDEX 3
#define MEMSUFFIX MMU_MODE3_SUFFIX
#define DATA_SIZE 1
#include "exec/cpu_ldst_template.h"
#define DATA_SIZE 2
#include "exec/cpu_ldst_template.h"
#define DATA_SIZE 4
#include "exec/cpu_ldst_template.h"
#define DATA_SIZE 8
#include "exec/cpu_ldst_template.h"
#undef CPU_MMU_INDEX
#undef MEMSUFFIX
#endif /* (NB_MMU_MODES >= 4) */
#if (NB_MMU_MODES >= 5) && defined(MMU_MODE4_SUFFIX)
#define CPU_MMU_INDEX 4
#define MEMSUFFIX MMU_MODE4_SUFFIX
#define DATA_SIZE 1
#include "exec/cpu_ldst_template.h"
#define DATA_SIZE 2
#include "exec/cpu_ldst_template.h"
#define DATA_SIZE 4
#include "exec/cpu_ldst_template.h"
#define DATA_SIZE 8
#include "exec/cpu_ldst_template.h"
#undef CPU_MMU_INDEX
#undef MEMSUFFIX
#endif /* (NB_MMU_MODES >= 5) */
#if (NB_MMU_MODES >= 6) && defined(MMU_MODE5_SUFFIX)
#define CPU_MMU_INDEX 5
#define MEMSUFFIX MMU_MODE5_SUFFIX
#define DATA_SIZE 1
#include "exec/cpu_ldst_template.h"
#define DATA_SIZE 2
#include "exec/cpu_ldst_template.h"
#define DATA_SIZE 4
#include "exec/cpu_ldst_template.h"
#define DATA_SIZE 8
#include "exec/cpu_ldst_template.h"
#undef CPU_MMU_INDEX
#undef MEMSUFFIX
#endif /* (NB_MMU_MODES >= 6) */
#if (NB_MMU_MODES >= 7) && defined(MMU_MODE6_SUFFIX)
#define CPU_MMU_INDEX 6
#define MEMSUFFIX MMU_MODE6_SUFFIX
#define DATA_SIZE 1
#include "exec/cpu_ldst_template.h"
#define DATA_SIZE 2
#include "exec/cpu_ldst_template.h"
#define DATA_SIZE 4
#include "exec/cpu_ldst_template.h"
#define DATA_SIZE 8
#include "exec/cpu_ldst_template.h"
#undef CPU_MMU_INDEX
#undef MEMSUFFIX
#endif /* (NB_MMU_MODES >= 7) */
#if (NB_MMU_MODES >= 8) && defined(MMU_MODE7_SUFFIX)
#define CPU_MMU_INDEX 7
#define MEMSUFFIX MMU_MODE7_SUFFIX
#define DATA_SIZE 1
#include "exec/cpu_ldst_template.h"
#define DATA_SIZE 2
#include "exec/cpu_ldst_template.h"
#define DATA_SIZE 4
#include "exec/cpu_ldst_template.h"
#define DATA_SIZE 8
#include "exec/cpu_ldst_template.h"
#undef CPU_MMU_INDEX
#undef MEMSUFFIX
#endif /* (NB_MMU_MODES >= 8) */
#if (NB_MMU_MODES >= 9) && defined(MMU_MODE8_SUFFIX)
#define CPU_MMU_INDEX 8
#define MEMSUFFIX MMU_MODE8_SUFFIX
#define DATA_SIZE 1
#include "exec/cpu_ldst_template.h"
#define DATA_SIZE 2
#include "exec/cpu_ldst_template.h"
#define DATA_SIZE 4
#include "exec/cpu_ldst_template.h"
#define DATA_SIZE 8
#include "exec/cpu_ldst_template.h"
#undef CPU_MMU_INDEX
#undef MEMSUFFIX
#endif /* (NB_MMU_MODES >= 9) */
#if (NB_MMU_MODES >= 10) && defined(MMU_MODE9_SUFFIX)
#define CPU_MMU_INDEX 9
#define MEMSUFFIX MMU_MODE9_SUFFIX
#define DATA_SIZE 1
#include "exec/cpu_ldst_template.h"
#define DATA_SIZE 2
#include "exec/cpu_ldst_template.h"
#define DATA_SIZE 4
#include "exec/cpu_ldst_template.h"
#define DATA_SIZE 8
#include "exec/cpu_ldst_template.h"
#undef CPU_MMU_INDEX
#undef MEMSUFFIX
#endif /* (NB_MMU_MODES >= 10) */
#if (NB_MMU_MODES >= 11) && defined(MMU_MODE10_SUFFIX)
#define CPU_MMU_INDEX 10
#define MEMSUFFIX MMU_MODE10_SUFFIX
#define DATA_SIZE 1
#include "exec/cpu_ldst_template.h"
#define DATA_SIZE 2
#include "exec/cpu_ldst_template.h"
#define DATA_SIZE 4
#include "exec/cpu_ldst_template.h"
#define DATA_SIZE 8
#include "exec/cpu_ldst_template.h"
#undef CPU_MMU_INDEX
#undef MEMSUFFIX
#endif /* (NB_MMU_MODES >= 11) */
#if (NB_MMU_MODES >= 12) && defined(MMU_MODE11_SUFFIX)
#define CPU_MMU_INDEX 11
#define MEMSUFFIX MMU_MODE11_SUFFIX
#define DATA_SIZE 1
#include "exec/cpu_ldst_template.h"
#define DATA_SIZE 2
#include "exec/cpu_ldst_template.h"
#define DATA_SIZE 4
#include "exec/cpu_ldst_template.h"
#define DATA_SIZE 8
#include "exec/cpu_ldst_template.h"
#undef CPU_MMU_INDEX
#undef MEMSUFFIX
#endif /* (NB_MMU_MODES >= 12) */
#if (NB_MMU_MODES > 12)
#error "NB_MMU_MODES > 12 is not supported for now"
#endif /* (NB_MMU_MODES > 12) */
/* these access are slower, they must be as rare as possible */
#define CPU_MMU_INDEX (cpu_mmu_index(env, false))
#define MEMSUFFIX _data
#define DATA_SIZE 1
#include "exec/cpu_ldst_template.h"
#define DATA_SIZE 2
#include "exec/cpu_ldst_template.h"
#define DATA_SIZE 4
#include "exec/cpu_ldst_template.h"
#define DATA_SIZE 8
#include "exec/cpu_ldst_template.h"
#undef CPU_MMU_INDEX
#undef MEMSUFFIX
/*
* Code access is deprecated in favour of translator_ld* functions
* (see translator.h). However there are still users that need to
* converted so for now these stay.
*/
#define CPU_MMU_INDEX (cpu_mmu_index(env, true))
#define MEMSUFFIX _code
#define SOFTMMU_CODE_ACCESS
#define DATA_SIZE 1
#include "exec/cpu_ldst_template.h"
#define DATA_SIZE 2
#include "exec/cpu_ldst_template.h"
#define DATA_SIZE 4
#include "exec/cpu_ldst_template.h"
#define DATA_SIZE 8
#include "exec/cpu_ldst_template.h"
#undef CPU_MMU_INDEX
#undef MEMSUFFIX
#undef SOFTMMU_CODE_ACCESS
uint32_t cpu_ldub_mmuidx_ra(CPUArchState *env, abi_ptr addr,
int mmu_idx, uintptr_t ra);
uint32_t cpu_lduw_mmuidx_ra(CPUArchState *env, abi_ptr addr,
int mmu_idx, uintptr_t ra);
uint32_t cpu_ldl_mmuidx_ra(CPUArchState *env, abi_ptr addr,
int mmu_idx, uintptr_t ra);
uint64_t cpu_ldq_mmuidx_ra(CPUArchState *env, abi_ptr addr,
int mmu_idx, uintptr_t ra);
int cpu_ldsb_mmuidx_ra(CPUArchState *env, abi_ptr addr,
int mmu_idx, uintptr_t ra);
int cpu_ldsw_mmuidx_ra(CPUArchState *env, abi_ptr addr,
int mmu_idx, uintptr_t ra);
void cpu_stb_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint32_t val,
int mmu_idx, uintptr_t retaddr);
void cpu_stw_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint32_t val,
int mmu_idx, uintptr_t retaddr);
void cpu_stl_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint32_t val,
int mmu_idx, uintptr_t retaddr);
void cpu_stq_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint64_t val,
int mmu_idx, uintptr_t retaddr);
#endif /* defined(CONFIG_USER_ONLY) */
uint32_t cpu_ldub_code(CPUArchState *env, abi_ptr addr);
uint32_t cpu_lduw_code(CPUArchState *env, abi_ptr addr);
uint32_t cpu_ldl_code(CPUArchState *env, abi_ptr addr);
uint64_t cpu_ldq_code(CPUArchState *env, abi_ptr addr);
static inline int cpu_ldsb_code(CPUArchState *env, abi_ptr addr)
{
return (int8_t)cpu_ldub_code(env, addr);
}
static inline int cpu_ldsw_code(CPUArchState *env, abi_ptr addr)
{
return (int16_t)cpu_lduw_code(env, addr);
}
/**
* tlb_vaddr_to_host:
* @env: CPUArchState

View File

@ -1,211 +0,0 @@
/*
* Software MMU support
*
* Generate inline load/store functions for one MMU mode and data
* size.
*
* Generate a store function as well as signed and unsigned loads.
*
* Not used directly but included from cpu_ldst.h.
*
* Copyright (c) 2003 Fabrice Bellard
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
*/
#if !defined(SOFTMMU_CODE_ACCESS)
#include "trace-root.h"
#endif
#include "qemu/plugin.h"
#include "trace/mem.h"
#if DATA_SIZE == 8
#define SUFFIX q
#define USUFFIX q
#define DATA_TYPE uint64_t
#define SHIFT 3
#elif DATA_SIZE == 4
#define SUFFIX l
#define USUFFIX l
#define DATA_TYPE uint32_t
#define SHIFT 2
#elif DATA_SIZE == 2
#define SUFFIX w
#define USUFFIX uw
#define DATA_TYPE uint16_t
#define DATA_STYPE int16_t
#define SHIFT 1
#elif DATA_SIZE == 1
#define SUFFIX b
#define USUFFIX ub
#define DATA_TYPE uint8_t
#define DATA_STYPE int8_t
#define SHIFT 0
#else
#error unsupported data size
#endif
#if DATA_SIZE == 8
#define RES_TYPE uint64_t
#else
#define RES_TYPE uint32_t
#endif
#ifdef SOFTMMU_CODE_ACCESS
#define ADDR_READ addr_code
#define MMUSUFFIX _cmmu
#define URETSUFFIX USUFFIX
#define SRETSUFFIX glue(s, SUFFIX)
#else
#define ADDR_READ addr_read
#define MMUSUFFIX _mmu
#define URETSUFFIX USUFFIX
#define SRETSUFFIX glue(s, SUFFIX)
#endif
/* generic load/store macros */
static inline RES_TYPE
glue(glue(glue(cpu_ld, USUFFIX), MEMSUFFIX), _ra)(CPUArchState *env,
target_ulong ptr,
uintptr_t retaddr)
{
CPUTLBEntry *entry;
RES_TYPE res;
target_ulong addr;
int mmu_idx = CPU_MMU_INDEX;
TCGMemOpIdx oi;
#if !defined(SOFTMMU_CODE_ACCESS)
uint16_t meminfo = trace_mem_build_info(SHIFT, false, MO_TE, false, mmu_idx);
trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo);
#endif
addr = ptr;
entry = tlb_entry(env, mmu_idx, addr);
if (unlikely(entry->ADDR_READ !=
(addr & (TARGET_PAGE_MASK | (DATA_SIZE - 1))))) {
oi = make_memop_idx(SHIFT, mmu_idx);
res = glue(glue(helper_ret_ld, URETSUFFIX), MMUSUFFIX)(env, addr,
oi, retaddr);
} else {
uintptr_t hostaddr = addr + entry->addend;
res = glue(glue(ld, USUFFIX), _p)((uint8_t *)hostaddr);
}
#ifndef SOFTMMU_CODE_ACCESS
qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, meminfo);
#endif
return res;
}
static inline RES_TYPE
glue(glue(cpu_ld, USUFFIX), MEMSUFFIX)(CPUArchState *env, target_ulong ptr)
{
return glue(glue(glue(cpu_ld, USUFFIX), MEMSUFFIX), _ra)(env, ptr, 0);
}
#if DATA_SIZE <= 2
static inline int
glue(glue(glue(cpu_lds, SUFFIX), MEMSUFFIX), _ra)(CPUArchState *env,
target_ulong ptr,
uintptr_t retaddr)
{
CPUTLBEntry *entry;
int res;
target_ulong addr;
int mmu_idx = CPU_MMU_INDEX;
TCGMemOpIdx oi;
#if !defined(SOFTMMU_CODE_ACCESS)
uint16_t meminfo = trace_mem_build_info(SHIFT, true, MO_TE, false, mmu_idx);
trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo);
#endif
addr = ptr;
entry = tlb_entry(env, mmu_idx, addr);
if (unlikely(entry->ADDR_READ !=
(addr & (TARGET_PAGE_MASK | (DATA_SIZE - 1))))) {
oi = make_memop_idx(SHIFT, mmu_idx);
res = (DATA_STYPE)glue(glue(helper_ret_ld, SRETSUFFIX),
MMUSUFFIX)(env, addr, oi, retaddr);
} else {
uintptr_t hostaddr = addr + entry->addend;
res = glue(glue(lds, SUFFIX), _p)((uint8_t *)hostaddr);
}
#ifndef SOFTMMU_CODE_ACCESS
qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, meminfo);
#endif
return res;
}
static inline int
glue(glue(cpu_lds, SUFFIX), MEMSUFFIX)(CPUArchState *env, target_ulong ptr)
{
return glue(glue(glue(cpu_lds, SUFFIX), MEMSUFFIX), _ra)(env, ptr, 0);
}
#endif
#ifndef SOFTMMU_CODE_ACCESS
/* generic store macro */
static inline void
glue(glue(glue(cpu_st, SUFFIX), MEMSUFFIX), _ra)(CPUArchState *env,
target_ulong ptr,
RES_TYPE v, uintptr_t retaddr)
{
CPUTLBEntry *entry;
target_ulong addr;
int mmu_idx = CPU_MMU_INDEX;
TCGMemOpIdx oi;
#if !defined(SOFTMMU_CODE_ACCESS)
uint16_t meminfo = trace_mem_build_info(SHIFT, false, MO_TE, true, mmu_idx);
trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo);
#endif
addr = ptr;
entry = tlb_entry(env, mmu_idx, addr);
if (unlikely(tlb_addr_write(entry) !=
(addr & (TARGET_PAGE_MASK | (DATA_SIZE - 1))))) {
oi = make_memop_idx(SHIFT, mmu_idx);
glue(glue(helper_ret_st, SUFFIX), MMUSUFFIX)(env, addr, v, oi,
retaddr);
} else {
uintptr_t hostaddr = addr + entry->addend;
glue(glue(st, SUFFIX), _p)((uint8_t *)hostaddr, v);
}
#ifndef SOFTMMU_CODE_ACCESS
qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, meminfo);
#endif
}
static inline void
glue(glue(cpu_st, SUFFIX), MEMSUFFIX)(CPUArchState *env, target_ulong ptr,
RES_TYPE v)
{
glue(glue(glue(cpu_st, SUFFIX), MEMSUFFIX), _ra)(env, ptr, v, 0);
}
#endif /* !SOFTMMU_CODE_ACCESS */
#undef RES_TYPE
#undef DATA_TYPE
#undef DATA_STYPE
#undef SUFFIX
#undef USUFFIX
#undef DATA_SIZE
#undef MMUSUFFIX
#undef ADDR_READ
#undef URETSUFFIX
#undef SRETSUFFIX
#undef SHIFT

View File

@ -1,159 +0,0 @@
/*
* User-only accessor function support
*
* Generate inline load/store functions for one data size.
*
* Generate a store function as well as signed and unsigned loads.
*
* Not used directly but included from cpu_ldst.h.
*
* Copyright (c) 2015 Linaro Limited
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
*/
#if !defined(CODE_ACCESS)
#include "trace-root.h"
#endif
#include "trace/mem.h"
#if DATA_SIZE == 8
#define SUFFIX q
#define USUFFIX q
#define DATA_TYPE uint64_t
#define SHIFT 3
#elif DATA_SIZE == 4
#define SUFFIX l
#define USUFFIX l
#define DATA_TYPE uint32_t
#define SHIFT 2
#elif DATA_SIZE == 2
#define SUFFIX w
#define USUFFIX uw
#define DATA_TYPE uint16_t
#define DATA_STYPE int16_t
#define SHIFT 1
#elif DATA_SIZE == 1
#define SUFFIX b
#define USUFFIX ub
#define DATA_TYPE uint8_t
#define DATA_STYPE int8_t
#define SHIFT 0
#else
#error unsupported data size
#endif
#if DATA_SIZE == 8
#define RES_TYPE uint64_t
#else
#define RES_TYPE uint32_t
#endif
static inline RES_TYPE
glue(glue(cpu_ld, USUFFIX), MEMSUFFIX)(CPUArchState *env, abi_ptr ptr)
{
RES_TYPE ret;
#ifdef CODE_ACCESS
set_helper_retaddr(1);
ret = glue(glue(ld, USUFFIX), _p)(g2h(ptr));
clear_helper_retaddr();
#else
uint16_t meminfo = trace_mem_build_info(SHIFT, false, MO_TE, false,
MMU_USER_IDX);
trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo);
ret = glue(glue(ld, USUFFIX), _p)(g2h(ptr));
#endif
return ret;
}
#ifndef CODE_ACCESS
static inline RES_TYPE
glue(glue(glue(cpu_ld, USUFFIX), MEMSUFFIX), _ra)(CPUArchState *env,
abi_ptr ptr,
uintptr_t retaddr)
{
RES_TYPE ret;
set_helper_retaddr(retaddr);
ret = glue(glue(cpu_ld, USUFFIX), MEMSUFFIX)(env, ptr);
clear_helper_retaddr();
return ret;
}
#endif
#if DATA_SIZE <= 2
static inline int
glue(glue(cpu_lds, SUFFIX), MEMSUFFIX)(CPUArchState *env, abi_ptr ptr)
{
int ret;
#ifdef CODE_ACCESS
set_helper_retaddr(1);
ret = glue(glue(lds, SUFFIX), _p)(g2h(ptr));
clear_helper_retaddr();
#else
uint16_t meminfo = trace_mem_build_info(SHIFT, true, MO_TE, false,
MMU_USER_IDX);
trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo);
ret = glue(glue(lds, SUFFIX), _p)(g2h(ptr));
qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, meminfo);
#endif
return ret;
}
#ifndef CODE_ACCESS
static inline int
glue(glue(glue(cpu_lds, SUFFIX), MEMSUFFIX), _ra)(CPUArchState *env,
abi_ptr ptr,
uintptr_t retaddr)
{
int ret;
set_helper_retaddr(retaddr);
ret = glue(glue(cpu_lds, SUFFIX), MEMSUFFIX)(env, ptr);
clear_helper_retaddr();
return ret;
}
#endif /* CODE_ACCESS */
#endif /* DATA_SIZE <= 2 */
#ifndef CODE_ACCESS
static inline void
glue(glue(cpu_st, SUFFIX), MEMSUFFIX)(CPUArchState *env, abi_ptr ptr,
RES_TYPE v)
{
uint16_t meminfo = trace_mem_build_info(SHIFT, false, MO_TE, true,
MMU_USER_IDX);
trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo);
glue(glue(st, SUFFIX), _p)(g2h(ptr), v);
qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, meminfo);
}
static inline void
glue(glue(glue(cpu_st, SUFFIX), MEMSUFFIX), _ra)(CPUArchState *env,
abi_ptr ptr,
RES_TYPE v,
uintptr_t retaddr)
{
set_helper_retaddr(retaddr);
glue(glue(cpu_st, SUFFIX), MEMSUFFIX)(env, ptr, v);
clear_helper_retaddr();
}
#endif
#undef RES_TYPE
#undef DATA_TYPE
#undef DATA_STYPE
#undef SUFFIX
#undef USUFFIX
#undef DATA_SIZE
#undef SHIFT

View File

@ -148,41 +148,19 @@ void translator_loop_temp_check(DisasContextBase *db);
/*
* Translator Load Functions
*
* These are intended to replace the old cpu_ld*_code functions and
* are mandatory for front-ends that have been migrated to the common
* translator_loop. These functions are only intended to be called
* from the translation stage and should not be called from helper
* functions. Those functions should be converted to encode the
* relevant information at translation time.
* These are intended to replace the direct usage of the cpu_ld*_code
* functions and are mandatory for front-ends that have been migrated
* to the common translator_loop. These functions are only intended
* to be called from the translation stage and should not be called
* from helper functions. Those functions should be converted to encode
* the relevant information at translation time.
*/
#ifdef CONFIG_USER_ONLY
#define DO_LOAD(type, name, shift) \
do { \
set_helper_retaddr(1); \
ret = name ## _p(g2h(pc)); \
clear_helper_retaddr(); \
} while (0)
#else
#define DO_LOAD(type, name, shift) \
do { \
int mmu_idx = cpu_mmu_index(env, true); \
TCGMemOpIdx oi = make_memop_idx(shift, mmu_idx); \
ret = helper_ret_ ## name ## _cmmu(env, pc, oi, 0); \
} while (0)
#endif
#define GEN_TRANSLATOR_LD(fullname, name, type, shift, swap_fn) \
#define GEN_TRANSLATOR_LD(fullname, type, load_fn, swap_fn) \
static inline type \
fullname ## _swap(CPUArchState *env, abi_ptr pc, bool do_swap) \
{ \
type ret; \
DO_LOAD(type, name, shift); \
\
type ret = load_fn(env, pc); \
if (do_swap) { \
ret = swap_fn(ret); \
} \
@ -195,11 +173,11 @@ void translator_loop_temp_check(DisasContextBase *db);
return fullname ## _swap(env, pc, false); \
}
GEN_TRANSLATOR_LD(translator_ldub, ldub, uint8_t, 0, /* no swap */ )
GEN_TRANSLATOR_LD(translator_ldsw, ldsw, int16_t, 1, bswap16)
GEN_TRANSLATOR_LD(translator_lduw, lduw, uint16_t, 1, bswap16)
GEN_TRANSLATOR_LD(translator_ldl, ldl, uint32_t, 2, bswap32)
GEN_TRANSLATOR_LD(translator_ldq, ldq, uint64_t, 3, bswap64)
GEN_TRANSLATOR_LD(translator_ldub, uint8_t, cpu_ldub_code, /* no swap */)
GEN_TRANSLATOR_LD(translator_ldsw, int16_t, cpu_ldsw_code, bswap16)
GEN_TRANSLATOR_LD(translator_lduw, uint16_t, cpu_lduw_code, bswap16)
GEN_TRANSLATOR_LD(translator_ldl, uint32_t, cpu_ldl_code, bswap32)
GEN_TRANSLATOR_LD(translator_ldq, uint64_t, cpu_ldq_code, bswap64)
#undef GEN_TRANSLATOR_LD
#endif /* EXEC__TRANSLATOR_H */

View File

@ -25,7 +25,7 @@
#ifndef TCG_TCG_OP_H
#define TCG_TCG_OP_H
#include "tcg.h"
#include "tcg/tcg.h"
#include "exec/helper-proto.h"
#include "exec/helper-gen.h"

View File

@ -31,7 +31,7 @@
#include "qemu/bitops.h"
#include "qemu/plugin.h"
#include "qemu/queue.h"
#include "tcg-mo.h"
#include "tcg/tcg-mo.h"
#include "tcg-target.h"
#include "qemu/int128.h"
@ -211,7 +211,7 @@ typedef uint64_t TCGRegSet;
typedef enum TCGOpcode {
#define DEF(name, oargs, iargs, cargs, flags) INDEX_op_ ## name,
#include "tcg-opc.h"
#include "tcg/tcg-opc.h"
#undef DEF
NB_OPS,
} TCGOpcode;
@ -1290,27 +1290,6 @@ void helper_be_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
void helper_be_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
TCGMemOpIdx oi, uintptr_t retaddr);
uint8_t helper_ret_ldub_cmmu(CPUArchState *env, target_ulong addr,
TCGMemOpIdx oi, uintptr_t retaddr);
int8_t helper_ret_ldsb_cmmu(CPUArchState *env, target_ulong addr,
TCGMemOpIdx oi, uintptr_t retaddr);
uint16_t helper_le_lduw_cmmu(CPUArchState *env, target_ulong addr,
TCGMemOpIdx oi, uintptr_t retaddr);
int16_t helper_le_ldsw_cmmu(CPUArchState *env, target_ulong addr,
TCGMemOpIdx oi, uintptr_t retaddr);
uint32_t helper_le_ldl_cmmu(CPUArchState *env, target_ulong addr,
TCGMemOpIdx oi, uintptr_t retaddr);
uint64_t helper_le_ldq_cmmu(CPUArchState *env, target_ulong addr,
TCGMemOpIdx oi, uintptr_t retaddr);
uint16_t helper_be_lduw_cmmu(CPUArchState *env, target_ulong addr,
TCGMemOpIdx oi, uintptr_t retaddr);
int16_t helper_be_ldsw_cmmu(CPUArchState *env, target_ulong addr,
TCGMemOpIdx oi, uintptr_t retaddr);
uint32_t helper_be_ldl_cmmu(CPUArchState *env, target_ulong addr,
TCGMemOpIdx oi, uintptr_t retaddr);
uint64_t helper_be_ldq_cmmu(CPUArchState *env, target_ulong addr,
TCGMemOpIdx oi, uintptr_t retaddr);
/* Temporary aliases until backends are converted. */
#ifdef TARGET_WORDS_BIGENDIAN
# define helper_ret_ldsw_mmu helper_be_ldsw_mmu
@ -1322,10 +1301,6 @@ uint64_t helper_be_ldq_cmmu(CPUArchState *env, target_ulong addr,
# define helper_ret_stw_mmu helper_be_stw_mmu
# define helper_ret_stl_mmu helper_be_stl_mmu
# define helper_ret_stq_mmu helper_be_stq_mmu
# define helper_ret_lduw_cmmu helper_be_lduw_cmmu
# define helper_ret_ldsw_cmmu helper_be_ldsw_cmmu
# define helper_ret_ldl_cmmu helper_be_ldl_cmmu
# define helper_ret_ldq_cmmu helper_be_ldq_cmmu
#else
# define helper_ret_ldsw_mmu helper_le_ldsw_mmu
# define helper_ret_lduw_mmu helper_le_lduw_mmu
@ -1336,10 +1311,6 @@ uint64_t helper_be_ldq_cmmu(CPUArchState *env, target_ulong addr,
# define helper_ret_stw_mmu helper_le_stw_mmu
# define helper_ret_stl_mmu helper_le_stl_mmu
# define helper_ret_stq_mmu helper_le_stq_mmu
# define helper_ret_lduw_cmmu helper_le_lduw_cmmu
# define helper_ret_ldsw_cmmu helper_le_ldsw_cmmu
# define helper_ret_ldl_cmmu helper_le_ldl_cmmu
# define helper_ret_ldq_cmmu helper_le_ldq_cmmu
#endif
uint32_t helper_atomic_cmpxchgb_mmu(CPUArchState *env, target_ulong addr,

View File

@ -10,6 +10,8 @@
#ifndef _SYSCALL_TRACE_H_
#define _SYSCALL_TRACE_H_
#include "trace-root.h"
/*
* These helpers just provide a common place for the various
* subsystems that want to track syscalls to put their hooks in. We

View File

@ -37,7 +37,7 @@
#include "qemu/plugin.h"
#include "cpu.h"
#include "exec/exec-all.h"
#include "tcg.h"
#include "tcg/tcg.h"
#include "qemu/timer.h"
#include "qemu/envlist.h"
#include "qemu/guest-random.h"

View File

@ -115,6 +115,7 @@
#include "user/syscall-trace.h"
#include "qapi/error.h"
#include "fd-trans.h"
#include "tcg/tcg.h"
#ifndef CLONE_IO
#define CLONE_IO 0x80000000 /* Clone io context */

View File

@ -46,6 +46,7 @@
#include "qemu/plugin-memory.h"
#include "hw/boards.h"
#endif
#include "trace/mem.h"
/* Uninstall and Reset handlers */

View File

@ -193,8 +193,6 @@ enum {
PALcode cheats and usees the KSEG mapping for its code+data rather than
physical addresses. */
#define MMU_MODE0_SUFFIX _kernel
#define MMU_MODE1_SUFFIX _user
#define MMU_KERNEL_IDX 0
#define MMU_USER_IDX 1
#define MMU_PHYS_IDX 2

View File

@ -23,7 +23,7 @@
#include "disas/disas.h"
#include "qemu/host-utils.h"
#include "exec/exec-all.h"
#include "tcg-op.h"
#include "tcg/tcg-op.h"
#include "exec/cpu_ldst.h"
#include "exec/helper-proto.h"
#include "exec/helper-gen.h"

View File

@ -31,7 +31,7 @@
#include "exec/cpu_ldst.h"
#include "qemu/int128.h"
#include "qemu/atomic128.h"
#include "tcg.h"
#include "tcg/tcg.h"
#include "fpu/softfloat.h"
#include <zlib.h> /* For crc32 */

View File

@ -25,6 +25,7 @@
#include "exec/helper-proto.h"
#include "tcg/tcg-gvec-desc.h"
#include "fpu/softfloat.h"
#include "tcg/tcg.h"
/* Note that vector data is stored in host-endian 64-bit chunks,

View File

@ -20,8 +20,8 @@
#include "cpu.h"
#include "exec/exec-all.h"
#include "tcg-op.h"
#include "tcg-op-gvec.h"
#include "tcg/tcg-op.h"
#include "tcg/tcg-op-gvec.h"
#include "qemu/log.h"
#include "arm_ldst.h"
#include "translate.h"

View File

@ -20,9 +20,9 @@
#include "qemu/osdep.h"
#include "cpu.h"
#include "exec/exec-all.h"
#include "tcg-op.h"
#include "tcg-op-gvec.h"
#include "tcg-gvec-desc.h"
#include "tcg/tcg-op.h"
#include "tcg/tcg-op-gvec.h"
#include "tcg/tcg-gvec-desc.h"
#include "qemu/log.h"
#include "arm_ldst.h"
#include "translate.h"

View File

@ -24,8 +24,8 @@
#include "internals.h"
#include "disas/disas.h"
#include "exec/exec-all.h"
#include "tcg-op.h"
#include "tcg-op-gvec.h"
#include "tcg/tcg-op.h"
#include "tcg/tcg-op-gvec.h"
#include "qemu/log.h"
#include "qemu/bitops.h"
#include "arm_ldst.h"

View File

@ -253,8 +253,6 @@ enum {
#define cpu_signal_handler cpu_cris_signal_handler
/* MMU modes definitions */
#define MMU_MODE0_SUFFIX _kernel
#define MMU_MODE1_SUFFIX _user
#define MMU_USER_IDX 1
static inline int cpu_mmu_index (CPUCRISState *env, bool ifetch)
{

View File

@ -27,7 +27,7 @@
#include "cpu.h"
#include "disas/disas.h"
#include "exec/exec-all.h"
#include "tcg-op.h"
#include "tcg/tcg-op.h"
#include "exec/helper-proto.h"
#include "mmu.h"
#include "exec/cpu_ldst.h"

View File

@ -22,7 +22,7 @@
#include "disas/disas.h"
#include "qemu/host-utils.h"
#include "exec/exec-all.h"
#include "tcg-op.h"
#include "tcg/tcg-op.h"
#include "exec/cpu_ldst.h"
#include "exec/helper-proto.h"
#include "exec/helper-gen.h"

View File

@ -1955,9 +1955,6 @@ uint64_t cpu_get_tsc(CPUX86State *env);
#define cpu_list x86_cpu_list
/* MMU modes definitions */
#define MMU_MODE0_SUFFIX _ksmap
#define MMU_MODE1_SUFFIX _user
#define MMU_MODE2_SUFFIX _knosmap /* SMAP disabled or CPL<3 && AC=1 */
#define MMU_KSMAP_IDX 0
#define MMU_USER_IDX 1
#define MMU_KNOSMAP_IDX 2

View File

@ -24,7 +24,7 @@
#include "exec/cpu_ldst.h"
#include "qemu/int128.h"
#include "qemu/atomic128.h"
#include "tcg.h"
#include "tcg/tcg.h"
void helper_cmpxchg8b_unlocked(CPUX86State *env, target_ulong a0)
{

View File

@ -37,37 +37,37 @@
# define LOG_PCALL_STATE(cpu) do { } while (0)
#endif
#ifdef CONFIG_USER_ONLY
#define MEMSUFFIX _kernel
#define DATA_SIZE 1
#include "exec/cpu_ldst_useronly_template.h"
/*
* TODO: Convert callers to compute cpu_mmu_index_kernel once
* and use *_mmuidx_ra directly.
*/
#define cpu_ldub_kernel_ra(e, p, r) \
cpu_ldub_mmuidx_ra(e, p, cpu_mmu_index_kernel(e), r)
#define cpu_lduw_kernel_ra(e, p, r) \
cpu_lduw_mmuidx_ra(e, p, cpu_mmu_index_kernel(e), r)
#define cpu_ldl_kernel_ra(e, p, r) \
cpu_ldl_mmuidx_ra(e, p, cpu_mmu_index_kernel(e), r)
#define cpu_ldq_kernel_ra(e, p, r) \
cpu_ldq_mmuidx_ra(e, p, cpu_mmu_index_kernel(e), r)
#define DATA_SIZE 2
#include "exec/cpu_ldst_useronly_template.h"
#define cpu_stb_kernel_ra(e, p, v, r) \
cpu_stb_mmuidx_ra(e, p, v, cpu_mmu_index_kernel(e), r)
#define cpu_stw_kernel_ra(e, p, v, r) \
cpu_stw_mmuidx_ra(e, p, v, cpu_mmu_index_kernel(e), r)
#define cpu_stl_kernel_ra(e, p, v, r) \
cpu_stl_mmuidx_ra(e, p, v, cpu_mmu_index_kernel(e), r)
#define cpu_stq_kernel_ra(e, p, v, r) \
cpu_stq_mmuidx_ra(e, p, v, cpu_mmu_index_kernel(e), r)
#define DATA_SIZE 4
#include "exec/cpu_ldst_useronly_template.h"
#define cpu_ldub_kernel(e, p) cpu_ldub_kernel_ra(e, p, 0)
#define cpu_lduw_kernel(e, p) cpu_lduw_kernel_ra(e, p, 0)
#define cpu_ldl_kernel(e, p) cpu_ldl_kernel_ra(e, p, 0)
#define cpu_ldq_kernel(e, p) cpu_ldq_kernel_ra(e, p, 0)
#define DATA_SIZE 8
#include "exec/cpu_ldst_useronly_template.h"
#undef MEMSUFFIX
#else
#define CPU_MMU_INDEX (cpu_mmu_index_kernel(env))
#define MEMSUFFIX _kernel
#define DATA_SIZE 1
#include "exec/cpu_ldst_template.h"
#define DATA_SIZE 2
#include "exec/cpu_ldst_template.h"
#define DATA_SIZE 4
#include "exec/cpu_ldst_template.h"
#define DATA_SIZE 8
#include "exec/cpu_ldst_template.h"
#undef CPU_MMU_INDEX
#undef MEMSUFFIX
#endif
#define cpu_stb_kernel(e, p, v) cpu_stb_kernel_ra(e, p, v, 0)
#define cpu_stw_kernel(e, p, v) cpu_stw_kernel_ra(e, p, v, 0)
#define cpu_stl_kernel(e, p, v) cpu_stl_kernel_ra(e, p, v, 0)
#define cpu_stq_kernel(e, p, v) cpu_stq_kernel_ra(e, p, v, 0)
/* return non zero if error */
static inline int load_segment_ra(CPUX86State *env, uint32_t *e1_ptr,

View File

@ -22,7 +22,7 @@
#include "cpu.h"
#include "disas/disas.h"
#include "exec/exec-all.h"
#include "tcg-op.h"
#include "tcg/tcg-op.h"
#include "exec/cpu_ldst.h"
#include "exec/translator.h"

View File

@ -23,7 +23,7 @@
#include "exec/helper-proto.h"
#include "exec/exec-all.h"
#include "exec/translator.h"
#include "tcg-op.h"
#include "tcg/tcg-op.h"
#include "qemu/qemu-print.h"
#include "exec/cpu_ldst.h"

View File

@ -519,8 +519,6 @@ enum {
#define cpu_list m68k_cpu_list
/* MMU modes definitions */
#define MMU_MODE0_SUFFIX _kernel
#define MMU_MODE1_SUFFIX _user
#define MMU_KERNEL_IDX 0
#define MMU_USER_IDX 1
static inline int cpu_mmu_index (CPUM68KState *env, bool ifetch)

View File

@ -42,8 +42,8 @@ static void cf_rte(CPUM68KState *env)
uint32_t fmt;
sp = env->aregs[7];
fmt = cpu_ldl_kernel(env, sp);
env->pc = cpu_ldl_kernel(env, sp + 4);
fmt = cpu_ldl_mmuidx_ra(env, sp, MMU_KERNEL_IDX, 0);
env->pc = cpu_ldl_mmuidx_ra(env, sp + 4, MMU_KERNEL_IDX, 0);
sp |= (fmt >> 28) & 3;
env->aregs[7] = sp + 8;
@ -58,13 +58,13 @@ static void m68k_rte(CPUM68KState *env)
sp = env->aregs[7];
throwaway:
sr = cpu_lduw_kernel(env, sp);
sr = cpu_lduw_mmuidx_ra(env, sp, MMU_KERNEL_IDX, 0);
sp += 2;
env->pc = cpu_ldl_kernel(env, sp);
env->pc = cpu_ldl_mmuidx_ra(env, sp, MMU_KERNEL_IDX, 0);
sp += 4;
if (m68k_feature(env, M68K_FEATURE_QUAD_MULDIV)) {
/* all except 68000 */
fmt = cpu_lduw_kernel(env, sp);
fmt = cpu_lduw_mmuidx_ra(env, sp, MMU_KERNEL_IDX, 0);
sp += 2;
switch (fmt >> 12) {
case 0:
@ -260,12 +260,12 @@ static void cf_interrupt_all(CPUM68KState *env, int is_hw)
/* ??? This could cause MMU faults. */
sp &= ~3;
sp -= 4;
cpu_stl_kernel(env, sp, retaddr);
cpu_stl_mmuidx_ra(env, sp, retaddr, MMU_KERNEL_IDX, 0);
sp -= 4;
cpu_stl_kernel(env, sp, fmt);
cpu_stl_mmuidx_ra(env, sp, fmt, MMU_KERNEL_IDX, 0);
env->aregs[7] = sp;
/* Jump to vector. */
env->pc = cpu_ldl_kernel(env, env->vbr + vector);
env->pc = cpu_ldl_mmuidx_ra(env, env->vbr + vector, MMU_KERNEL_IDX, 0);
}
static inline void do_stack_frame(CPUM68KState *env, uint32_t *sp,
@ -278,23 +278,24 @@ static inline void do_stack_frame(CPUM68KState *env, uint32_t *sp,
switch (format) {
case 4:
*sp -= 4;
cpu_stl_kernel(env, *sp, env->pc);
cpu_stl_mmuidx_ra(env, *sp, env->pc, MMU_KERNEL_IDX, 0);
*sp -= 4;
cpu_stl_kernel(env, *sp, addr);
cpu_stl_mmuidx_ra(env, *sp, addr, MMU_KERNEL_IDX, 0);
break;
case 3:
case 2:
*sp -= 4;
cpu_stl_kernel(env, *sp, addr);
cpu_stl_mmuidx_ra(env, *sp, addr, MMU_KERNEL_IDX, 0);
break;
}
*sp -= 2;
cpu_stw_kernel(env, *sp, (format << 12) + (cs->exception_index << 2));
cpu_stw_mmuidx_ra(env, *sp, (format << 12) + (cs->exception_index << 2),
MMU_KERNEL_IDX, 0);
}
*sp -= 4;
cpu_stl_kernel(env, *sp, retaddr);
cpu_stl_mmuidx_ra(env, *sp, retaddr, MMU_KERNEL_IDX, 0);
*sp -= 2;
cpu_stw_kernel(env, *sp, sr);
cpu_stw_mmuidx_ra(env, *sp, sr, MMU_KERNEL_IDX, 0);
}
static void m68k_interrupt_all(CPUM68KState *env, int is_hw)
@ -353,36 +354,52 @@ static void m68k_interrupt_all(CPUM68KState *env, int is_hw)
cpu_abort(cs, "DOUBLE MMU FAULT\n");
}
env->mmu.fault = true;
/* push data 3 */
sp -= 4;
cpu_stl_kernel(env, sp, 0); /* push data 3 */
cpu_stl_mmuidx_ra(env, sp, 0, MMU_KERNEL_IDX, 0);
/* push data 2 */
sp -= 4;
cpu_stl_kernel(env, sp, 0); /* push data 2 */
cpu_stl_mmuidx_ra(env, sp, 0, MMU_KERNEL_IDX, 0);
/* push data 1 */
sp -= 4;
cpu_stl_kernel(env, sp, 0); /* push data 1 */
cpu_stl_mmuidx_ra(env, sp, 0, MMU_KERNEL_IDX, 0);
/* write back 1 / push data 0 */
sp -= 4;
cpu_stl_kernel(env, sp, 0); /* write back 1 / push data 0 */
cpu_stl_mmuidx_ra(env, sp, 0, MMU_KERNEL_IDX, 0);
/* write back 1 address */
sp -= 4;
cpu_stl_kernel(env, sp, 0); /* write back 1 address */
cpu_stl_mmuidx_ra(env, sp, 0, MMU_KERNEL_IDX, 0);
/* write back 2 data */
sp -= 4;
cpu_stl_kernel(env, sp, 0); /* write back 2 data */
cpu_stl_mmuidx_ra(env, sp, 0, MMU_KERNEL_IDX, 0);
/* write back 2 address */
sp -= 4;
cpu_stl_kernel(env, sp, 0); /* write back 2 address */
cpu_stl_mmuidx_ra(env, sp, 0, MMU_KERNEL_IDX, 0);
/* write back 3 data */
sp -= 4;
cpu_stl_kernel(env, sp, 0); /* write back 3 data */
cpu_stl_mmuidx_ra(env, sp, 0, MMU_KERNEL_IDX, 0);
/* write back 3 address */
sp -= 4;
cpu_stl_kernel(env, sp, env->mmu.ar); /* write back 3 address */
cpu_stl_mmuidx_ra(env, sp, env->mmu.ar, MMU_KERNEL_IDX, 0);
/* fault address */
sp -= 4;
cpu_stl_kernel(env, sp, env->mmu.ar); /* fault address */
cpu_stl_mmuidx_ra(env, sp, env->mmu.ar, MMU_KERNEL_IDX, 0);
/* write back 1 status */
sp -= 2;
cpu_stw_kernel(env, sp, 0); /* write back 1 status */
cpu_stw_mmuidx_ra(env, sp, 0, MMU_KERNEL_IDX, 0);
/* write back 2 status */
sp -= 2;
cpu_stw_kernel(env, sp, 0); /* write back 2 status */
cpu_stw_mmuidx_ra(env, sp, 0, MMU_KERNEL_IDX, 0);
/* write back 3 status */
sp -= 2;
cpu_stw_kernel(env, sp, 0); /* write back 3 status */
cpu_stw_mmuidx_ra(env, sp, 0, MMU_KERNEL_IDX, 0);
/* special status word */
sp -= 2;
cpu_stw_kernel(env, sp, env->mmu.ssw); /* special status word */
cpu_stw_mmuidx_ra(env, sp, env->mmu.ssw, MMU_KERNEL_IDX, 0);
/* effective address */
sp -= 4;
cpu_stl_kernel(env, sp, env->mmu.ar); /* effective address */
cpu_stl_mmuidx_ra(env, sp, env->mmu.ar, MMU_KERNEL_IDX, 0);
do_stack_frame(env, &sp, 7, oldsr, 0, retaddr);
env->mmu.fault = false;
if (qemu_loglevel_mask(CPU_LOG_INT)) {
@ -414,7 +431,7 @@ static void m68k_interrupt_all(CPUM68KState *env, int is_hw)
env->aregs[7] = sp;
/* Jump to vector. */
env->pc = cpu_ldl_kernel(env, env->vbr + vector);
env->pc = cpu_ldl_mmuidx_ra(env, env->vbr + vector, MMU_KERNEL_IDX, 0);
}
static void do_interrupt_all(CPUM68KState *env, int is_hw)

View File

@ -22,7 +22,7 @@
#include "cpu.h"
#include "disas/disas.h"
#include "exec/exec-all.h"
#include "tcg-op.h"
#include "tcg/tcg-op.h"
#include "qemu/log.h"
#include "qemu/qemu-print.h"
#include "exec/cpu_ldst.h"

View File

@ -328,9 +328,6 @@ int cpu_mb_signal_handler(int host_signum, void *pinfo,
#define cpu_signal_handler cpu_mb_signal_handler
/* MMU modes definitions */
#define MMU_MODE0_SUFFIX _nommu
#define MMU_MODE1_SUFFIX _kernel
#define MMU_MODE2_SUFFIX _user
#define MMU_NOMMU_IDX 0
#define MMU_KERNEL_IDX 1
#define MMU_USER_IDX 2

View File

@ -22,7 +22,7 @@
#include "cpu.h"
#include "disas/disas.h"
#include "exec/exec-all.h"
#include "tcg-op.h"
#include "tcg/tcg-op.h"
#include "exec/helper-proto.h"
#include "microblaze-decode.h"
#include "exec/cpu_ldst.h"

View File

@ -1147,10 +1147,6 @@ extern uint32_t cpu_rddsp(uint32_t mask_num, CPUMIPSState *env);
* MMU modes definitions. We carefully match the indices with our
* hflags layout.
*/
#define MMU_MODE0_SUFFIX _kernel
#define MMU_MODE1_SUFFIX _super
#define MMU_MODE2_SUFFIX _user
#define MMU_MODE3_SUFFIX _error
#define MMU_USER_IDX 2
static inline int hflags_mmu_index(uint32_t hflags)

View File

@ -52,69 +52,6 @@ static void raise_exception(CPUMIPSState *env, uint32_t exception)
do_raise_exception(env, exception, 0);
}
#if defined(CONFIG_USER_ONLY)
#define HELPER_LD(name, insn, type) \
static inline type do_##name(CPUMIPSState *env, target_ulong addr, \
int mem_idx, uintptr_t retaddr) \
{ \
return (type) cpu_##insn##_data_ra(env, addr, retaddr); \
}
#else
#define HELPER_LD(name, insn, type) \
static inline type do_##name(CPUMIPSState *env, target_ulong addr, \
int mem_idx, uintptr_t retaddr) \
{ \
switch (mem_idx) { \
case 0: return (type) cpu_##insn##_kernel_ra(env, addr, retaddr); \
case 1: return (type) cpu_##insn##_super_ra(env, addr, retaddr); \
default: \
case 2: return (type) cpu_##insn##_user_ra(env, addr, retaddr); \
case 3: return (type) cpu_##insn##_error_ra(env, addr, retaddr); \
} \
}
#endif
HELPER_LD(lw, ldl, int32_t)
#if defined(TARGET_MIPS64)
HELPER_LD(ld, ldq, int64_t)
#endif
#undef HELPER_LD
#if defined(CONFIG_USER_ONLY)
#define HELPER_ST(name, insn, type) \
static inline void do_##name(CPUMIPSState *env, target_ulong addr, \
type val, int mem_idx, uintptr_t retaddr) \
{ \
cpu_##insn##_data_ra(env, addr, val, retaddr); \
}
#else
#define HELPER_ST(name, insn, type) \
static inline void do_##name(CPUMIPSState *env, target_ulong addr, \
type val, int mem_idx, uintptr_t retaddr) \
{ \
switch (mem_idx) { \
case 0: \
cpu_##insn##_kernel_ra(env, addr, val, retaddr); \
break; \
case 1: \
cpu_##insn##_super_ra(env, addr, val, retaddr); \
break; \
default: \
case 2: \
cpu_##insn##_user_ra(env, addr, val, retaddr); \
break; \
case 3: \
cpu_##insn##_error_ra(env, addr, val, retaddr); \
break; \
} \
}
#endif
HELPER_ST(sb, stb, uint8_t)
HELPER_ST(sw, stl, uint32_t)
#if defined(TARGET_MIPS64)
HELPER_ST(sd, stq, uint64_t)
#endif
#undef HELPER_ST
/* 64 bits arithmetic for 32 bits hosts */
static inline uint64_t get_HILO(CPUMIPSState *env)
{
@ -379,12 +316,12 @@ target_ulong helper_##name(CPUMIPSState *env, target_ulong arg, int mem_idx) \
} \
env->CP0_LLAddr = do_translate_address(env, arg, 0, GETPC()); \
env->lladdr = arg; \
env->llval = do_##insn(env, arg, mem_idx, GETPC()); \
env->llval = cpu_##insn##_mmuidx_ra(env, arg, mem_idx, GETPC()); \
return env->llval; \
}
HELPER_LD_ATOMIC(ll, lw, 0x3)
HELPER_LD_ATOMIC(ll, ldl, 0x3)
#ifdef TARGET_MIPS64
HELPER_LD_ATOMIC(lld, ld, 0x7)
HELPER_LD_ATOMIC(lld, ldq, 0x7)
#endif
#undef HELPER_LD_ATOMIC
#endif
@ -400,42 +337,42 @@ HELPER_LD_ATOMIC(lld, ld, 0x7)
void helper_swl(CPUMIPSState *env, target_ulong arg1, target_ulong arg2,
int mem_idx)
{
do_sb(env, arg2, (uint8_t)(arg1 >> 24), mem_idx, GETPC());
cpu_stb_mmuidx_ra(env, arg2, (uint8_t)(arg1 >> 24), mem_idx, GETPC());
if (GET_LMASK(arg2) <= 2) {
do_sb(env, GET_OFFSET(arg2, 1), (uint8_t)(arg1 >> 16), mem_idx,
GETPC());
cpu_stb_mmuidx_ra(env, GET_OFFSET(arg2, 1), (uint8_t)(arg1 >> 16),
mem_idx, GETPC());
}
if (GET_LMASK(arg2) <= 1) {
do_sb(env, GET_OFFSET(arg2, 2), (uint8_t)(arg1 >> 8), mem_idx,
GETPC());
cpu_stb_mmuidx_ra(env, GET_OFFSET(arg2, 2), (uint8_t)(arg1 >> 8),
mem_idx, GETPC());
}
if (GET_LMASK(arg2) == 0) {
do_sb(env, GET_OFFSET(arg2, 3), (uint8_t)arg1, mem_idx,
GETPC());
cpu_stb_mmuidx_ra(env, GET_OFFSET(arg2, 3), (uint8_t)arg1,
mem_idx, GETPC());
}
}
void helper_swr(CPUMIPSState *env, target_ulong arg1, target_ulong arg2,
int mem_idx)
{
do_sb(env, arg2, (uint8_t)arg1, mem_idx, GETPC());
cpu_stb_mmuidx_ra(env, arg2, (uint8_t)arg1, mem_idx, GETPC());
if (GET_LMASK(arg2) >= 1) {
do_sb(env, GET_OFFSET(arg2, -1), (uint8_t)(arg1 >> 8), mem_idx,
GETPC());
cpu_stb_mmuidx_ra(env, GET_OFFSET(arg2, -1), (uint8_t)(arg1 >> 8),
mem_idx, GETPC());
}
if (GET_LMASK(arg2) >= 2) {
do_sb(env, GET_OFFSET(arg2, -2), (uint8_t)(arg1 >> 16), mem_idx,
GETPC());
cpu_stb_mmuidx_ra(env, GET_OFFSET(arg2, -2), (uint8_t)(arg1 >> 16),
mem_idx, GETPC());
}
if (GET_LMASK(arg2) == 3) {
do_sb(env, GET_OFFSET(arg2, -3), (uint8_t)(arg1 >> 24), mem_idx,
GETPC());
cpu_stb_mmuidx_ra(env, GET_OFFSET(arg2, -3), (uint8_t)(arg1 >> 24),
mem_idx, GETPC());
}
}
@ -453,82 +390,82 @@ void helper_swr(CPUMIPSState *env, target_ulong arg1, target_ulong arg2,
void helper_sdl(CPUMIPSState *env, target_ulong arg1, target_ulong arg2,
int mem_idx)
{
do_sb(env, arg2, (uint8_t)(arg1 >> 56), mem_idx, GETPC());
cpu_stb_mmuidx_ra(env, arg2, (uint8_t)(arg1 >> 56), mem_idx, GETPC());
if (GET_LMASK64(arg2) <= 6) {
do_sb(env, GET_OFFSET(arg2, 1), (uint8_t)(arg1 >> 48), mem_idx,
GETPC());
cpu_stb_mmuidx_ra(env, GET_OFFSET(arg2, 1), (uint8_t)(arg1 >> 48),
mem_idx, GETPC());
}
if (GET_LMASK64(arg2) <= 5) {
do_sb(env, GET_OFFSET(arg2, 2), (uint8_t)(arg1 >> 40), mem_idx,
GETPC());
cpu_stb_mmuidx_ra(env, GET_OFFSET(arg2, 2), (uint8_t)(arg1 >> 40),
mem_idx, GETPC());
}
if (GET_LMASK64(arg2) <= 4) {
do_sb(env, GET_OFFSET(arg2, 3), (uint8_t)(arg1 >> 32), mem_idx,
GETPC());
cpu_stb_mmuidx_ra(env, GET_OFFSET(arg2, 3), (uint8_t)(arg1 >> 32),
mem_idx, GETPC());
}
if (GET_LMASK64(arg2) <= 3) {
do_sb(env, GET_OFFSET(arg2, 4), (uint8_t)(arg1 >> 24), mem_idx,
GETPC());
cpu_stb_mmuidx_ra(env, GET_OFFSET(arg2, 4), (uint8_t)(arg1 >> 24),
mem_idx, GETPC());
}
if (GET_LMASK64(arg2) <= 2) {
do_sb(env, GET_OFFSET(arg2, 5), (uint8_t)(arg1 >> 16), mem_idx,
GETPC());
cpu_stb_mmuidx_ra(env, GET_OFFSET(arg2, 5), (uint8_t)(arg1 >> 16),
mem_idx, GETPC());
}
if (GET_LMASK64(arg2) <= 1) {
do_sb(env, GET_OFFSET(arg2, 6), (uint8_t)(arg1 >> 8), mem_idx,
GETPC());
cpu_stb_mmuidx_ra(env, GET_OFFSET(arg2, 6), (uint8_t)(arg1 >> 8),
mem_idx, GETPC());
}
if (GET_LMASK64(arg2) <= 0) {
do_sb(env, GET_OFFSET(arg2, 7), (uint8_t)arg1, mem_idx,
GETPC());
cpu_stb_mmuidx_ra(env, GET_OFFSET(arg2, 7), (uint8_t)arg1,
mem_idx, GETPC());
}
}
void helper_sdr(CPUMIPSState *env, target_ulong arg1, target_ulong arg2,
int mem_idx)
{
do_sb(env, arg2, (uint8_t)arg1, mem_idx, GETPC());
cpu_stb_mmuidx_ra(env, arg2, (uint8_t)arg1, mem_idx, GETPC());
if (GET_LMASK64(arg2) >= 1) {
do_sb(env, GET_OFFSET(arg2, -1), (uint8_t)(arg1 >> 8), mem_idx,
GETPC());
cpu_stb_mmuidx_ra(env, GET_OFFSET(arg2, -1), (uint8_t)(arg1 >> 8),
mem_idx, GETPC());
}
if (GET_LMASK64(arg2) >= 2) {
do_sb(env, GET_OFFSET(arg2, -2), (uint8_t)(arg1 >> 16), mem_idx,
GETPC());
cpu_stb_mmuidx_ra(env, GET_OFFSET(arg2, -2), (uint8_t)(arg1 >> 16),
mem_idx, GETPC());
}
if (GET_LMASK64(arg2) >= 3) {
do_sb(env, GET_OFFSET(arg2, -3), (uint8_t)(arg1 >> 24), mem_idx,
GETPC());
cpu_stb_mmuidx_ra(env, GET_OFFSET(arg2, -3), (uint8_t)(arg1 >> 24),
mem_idx, GETPC());
}
if (GET_LMASK64(arg2) >= 4) {
do_sb(env, GET_OFFSET(arg2, -4), (uint8_t)(arg1 >> 32), mem_idx,
GETPC());
cpu_stb_mmuidx_ra(env, GET_OFFSET(arg2, -4), (uint8_t)(arg1 >> 32),
mem_idx, GETPC());
}
if (GET_LMASK64(arg2) >= 5) {
do_sb(env, GET_OFFSET(arg2, -5), (uint8_t)(arg1 >> 40), mem_idx,
GETPC());
cpu_stb_mmuidx_ra(env, GET_OFFSET(arg2, -5), (uint8_t)(arg1 >> 40),
mem_idx, GETPC());
}
if (GET_LMASK64(arg2) >= 6) {
do_sb(env, GET_OFFSET(arg2, -6), (uint8_t)(arg1 >> 48), mem_idx,
GETPC());
cpu_stb_mmuidx_ra(env, GET_OFFSET(arg2, -6), (uint8_t)(arg1 >> 48),
mem_idx, GETPC());
}
if (GET_LMASK64(arg2) == 7) {
do_sb(env, GET_OFFSET(arg2, -7), (uint8_t)(arg1 >> 56), mem_idx,
GETPC());
cpu_stb_mmuidx_ra(env, GET_OFFSET(arg2, -7), (uint8_t)(arg1 >> 56),
mem_idx, GETPC());
}
}
#endif /* TARGET_MIPS64 */
@ -546,14 +483,14 @@ void helper_lwm(CPUMIPSState *env, target_ulong addr, target_ulong reglist,
for (i = 0; i < base_reglist; i++) {
env->active_tc.gpr[multiple_regs[i]] =
(target_long)do_lw(env, addr, mem_idx, GETPC());
(target_long)cpu_ldl_mmuidx_ra(env, addr, mem_idx, GETPC());
addr += 4;
}
}
if (do_r31) {
env->active_tc.gpr[31] = (target_long)do_lw(env, addr, mem_idx,
GETPC());
env->active_tc.gpr[31] =
(target_long)cpu_ldl_mmuidx_ra(env, addr, mem_idx, GETPC());
}
}
@ -567,14 +504,14 @@ void helper_swm(CPUMIPSState *env, target_ulong addr, target_ulong reglist,
target_ulong i;
for (i = 0; i < base_reglist; i++) {
do_sw(env, addr, env->active_tc.gpr[multiple_regs[i]], mem_idx,
GETPC());
cpu_stw_mmuidx_ra(env, addr, env->active_tc.gpr[multiple_regs[i]],
mem_idx, GETPC());
addr += 4;
}
}
if (do_r31) {
do_sw(env, addr, env->active_tc.gpr[31], mem_idx, GETPC());
cpu_stw_mmuidx_ra(env, addr, env->active_tc.gpr[31], mem_idx, GETPC());
}
}
@ -589,14 +526,15 @@ void helper_ldm(CPUMIPSState *env, target_ulong addr, target_ulong reglist,
target_ulong i;
for (i = 0; i < base_reglist; i++) {
env->active_tc.gpr[multiple_regs[i]] = do_ld(env, addr, mem_idx,
GETPC());
env->active_tc.gpr[multiple_regs[i]] =
cpu_ldq_mmuidx_ra(env, addr, mem_idx, GETPC());
addr += 8;
}
}
if (do_r31) {
env->active_tc.gpr[31] = do_ld(env, addr, mem_idx, GETPC());
env->active_tc.gpr[31] =
cpu_ldq_mmuidx_ra(env, addr, mem_idx, GETPC());
}
}
@ -610,14 +548,14 @@ void helper_sdm(CPUMIPSState *env, target_ulong addr, target_ulong reglist,
target_ulong i;
for (i = 0; i < base_reglist; i++) {
do_sd(env, addr, env->active_tc.gpr[multiple_regs[i]], mem_idx,
GETPC());
cpu_stq_mmuidx_ra(env, addr, env->active_tc.gpr[multiple_regs[i]],
mem_idx, GETPC());
addr += 8;
}
}
if (do_r31) {
do_sd(env, addr, env->active_tc.gpr[31], mem_idx, GETPC());
cpu_stq_mmuidx_ra(env, addr, env->active_tc.gpr[31], mem_idx, GETPC());
}
}
#endif

View File

@ -26,7 +26,7 @@
#include "internal.h"
#include "disas/disas.h"
#include "exec/exec-all.h"
#include "tcg-op.h"
#include "tcg/tcg-op.h"
#include "exec/cpu_ldst.h"
#include "hw/mips/cpudevs.h"

View File

@ -26,7 +26,7 @@
#include "cpu.h"
#include "exec/exec-all.h"
#include "disas/disas.h"
#include "tcg-op.h"
#include "tcg/tcg-op.h"
#include "exec/cpu_ldst.h"
#include "qemu/qemu-print.h"

View File

@ -217,8 +217,6 @@ void do_nios2_semihosting(CPUNios2State *env);
#define CPU_SAVE_VERSION 1
/* MMU modes definitions */
#define MMU_MODE0_SUFFIX _kernel
#define MMU_MODE1_SUFFIX _user
#define MMU_SUPERVISOR_IDX 0
#define MMU_USER_IDX 1

View File

@ -23,7 +23,7 @@
#include "qemu/osdep.h"
#include "cpu.h"
#include "tcg-op.h"
#include "tcg/tcg-op.h"
#include "exec/exec-all.h"
#include "disas/disas.h"
#include "exec/helper-proto.h"

View File

@ -22,7 +22,7 @@
#include "cpu.h"
#include "exec/exec-all.h"
#include "disas/disas.h"
#include "tcg-op.h"
#include "tcg/tcg-op.h"
#include "qemu/log.h"
#include "qemu/bitops.h"
#include "qemu/qemu-print.h"

View File

@ -951,8 +951,6 @@ struct ppc_radix_page_info {
* + real/paged mode combinations. The other two modes are for
* external PID load/store.
*/
#define MMU_MODE8_SUFFIX _epl
#define MMU_MODE9_SUFFIX _eps
#define PPC_TLB_EPID_LOAD 8
#define PPC_TLB_EPID_STORE 9

View File

@ -25,7 +25,7 @@
#include "exec/helper-proto.h"
#include "helper_regs.h"
#include "exec/cpu_ldst.h"
#include "tcg.h"
#include "tcg/tcg.h"
#include "internal.h"
#include "qemu/atomic128.h"
@ -177,14 +177,7 @@ static void dcbz_common(CPUPPCState *env, target_ulong addr,
} else {
/* Slow path */
for (i = 0; i < dcbz_size; i += 8) {
if (epid) {
#if !defined(CONFIG_USER_ONLY)
/* Does not make sense on USER_ONLY config */
cpu_stq_eps_ra(env, addr + i, 0, retaddr);
#endif
} else {
cpu_stq_data_ra(env, addr + i, 0, retaddr);
}
cpu_stq_mmuidx_ra(env, addr + i, 0, mmu_idx, retaddr);
}
}
}
@ -216,7 +209,7 @@ void helper_icbiep(CPUPPCState *env, target_ulong addr)
#if !defined(CONFIG_USER_ONLY)
/* See comments above */
addr &= ~(env->dcache_line_size - 1);
cpu_ldl_epl_ra(env, addr, GETPC());
cpu_ldl_mmuidx_ra(env, addr, PPC_TLB_EPID_LOAD, GETPC());
#endif
}

View File

@ -23,8 +23,8 @@
#include "internal.h"
#include "disas/disas.h"
#include "exec/exec-all.h"
#include "tcg-op.h"
#include "tcg-op-gvec.h"
#include "tcg/tcg-op.h"
#include "tcg/tcg-op-gvec.h"
#include "qemu/host-utils.h"
#include "qemu/main-loop.h"
#include "exec/cpu_ldst.h"

View File

@ -22,7 +22,7 @@
#include "qemu/main-loop.h"
#include "cpu.h"
#include "exec/exec-all.h"
#include "tcg-op.h"
#include "tcg/tcg-op.h"
#include "trace.h"
int riscv_cpu_mmu_index(CPURISCVState *env, bool ifetch)

View File

@ -19,7 +19,7 @@
#include "qemu/osdep.h"
#include "qemu/log.h"
#include "cpu.h"
#include "tcg-op.h"
#include "tcg/tcg-op.h"
#include "disas/disas.h"
#include "exec/cpu_ldst.h"
#include "exec/exec-all.h"

View File

@ -36,11 +36,6 @@
#define TARGET_INSN_START_EXTRA_WORDS 2
#define MMU_MODE0_SUFFIX _primary
#define MMU_MODE1_SUFFIX _secondary
#define MMU_MODE2_SUFFIX _home
#define MMU_MODE3_SUFFIX _real
#define MMU_USER_IDX 0
#define S390_MAX_CPUS 248

View File

@ -27,6 +27,7 @@
#include "exec/cpu_ldst.h"
#include "qemu/int128.h"
#include "qemu/atomic128.h"
#include "tcg/tcg.h"
#if !defined(CONFIG_USER_ONLY)
#include "hw/s390x/storage-keys.h"
@ -2025,7 +2026,7 @@ uint32_t HELPER(testblock)(CPUS390XState *env, uint64_t real_addr)
real_addr = wrap_address(env, real_addr) & TARGET_PAGE_MASK;
for (i = 0; i < TARGET_PAGE_SIZE; i += 8) {
cpu_stq_real_ra(env, real_addr + i, 0, ra);
cpu_stq_mmuidx_ra(env, real_addr + i, 0, MMU_REAL_IDX, ra);
}
return 0;
@ -2259,11 +2260,11 @@ void HELPER(idte)(CPUS390XState *env, uint64_t r1, uint64_t r2, uint32_t m4)
for (i = 0; i < entries; i++) {
/* addresses are not wrapped in 24/31bit mode but table index is */
raddr = table + ((index + i) & 0x7ff) * sizeof(entry);
entry = cpu_ldq_real_ra(env, raddr, ra);
entry = cpu_ldq_mmuidx_ra(env, raddr, MMU_REAL_IDX, ra);
if (!(entry & REGION_ENTRY_I)) {
/* we are allowed to not store if already invalid */
entry |= REGION_ENTRY_I;
cpu_stq_real_ra(env, raddr, entry, ra);
cpu_stq_mmuidx_ra(env, raddr, entry, MMU_REAL_IDX, ra);
}
}
}
@ -2290,9 +2291,9 @@ void HELPER(ipte)(CPUS390XState *env, uint64_t pto, uint64_t vaddr,
pte_addr += VADDR_PAGE_TX(vaddr) * 8;
/* Mark the page table entry as invalid */
pte = cpu_ldq_real_ra(env, pte_addr, ra);
pte = cpu_ldq_mmuidx_ra(env, pte_addr, MMU_REAL_IDX, ra);
pte |= PAGE_ENTRY_I;
cpu_stq_real_ra(env, pte_addr, pte, ra);
cpu_stq_mmuidx_ra(env, pte_addr, pte, MMU_REAL_IDX, ra);
/* XXX we exploit the fact that Linux passes the exact virtual
address here - it's not obliged to! */

View File

@ -33,8 +33,8 @@
#include "internal.h"
#include "disas/disas.h"
#include "exec/exec-all.h"
#include "tcg-op.h"
#include "tcg-op-gvec.h"
#include "tcg/tcg-op.h"
#include "tcg/tcg-op-gvec.h"
#include "qemu/log.h"
#include "qemu/host-utils.h"
#include "exec/cpu_ldst.h"

View File

@ -254,8 +254,6 @@ void cpu_load_tlb(CPUSH4State * env);
#define cpu_list sh4_cpu_list
/* MMU modes definitions */
#define MMU_MODE0_SUFFIX _kernel
#define MMU_MODE1_SUFFIX _user
#define MMU_USER_IDX 1
static inline int cpu_mmu_index (CPUSH4State *env, bool ifetch)
{

View File

@ -23,7 +23,7 @@
#include "cpu.h"
#include "disas/disas.h"
#include "exec/exec-all.h"
#include "tcg-op.h"
#include "tcg/tcg-op.h"
#include "exec/cpu_ldst.h"
#include "exec/helper-proto.h"
#include "exec/helper-gen.h"

View File

@ -19,7 +19,7 @@
#include "qemu/osdep.h"
#include "cpu.h"
#include "tcg.h"
#include "tcg/tcg.h"
#include "exec/helper-proto.h"
#include "exec/exec-all.h"
#include "exec/cpu_ldst.h"

View File

@ -24,7 +24,7 @@
#include "disas/disas.h"
#include "exec/helper-proto.h"
#include "exec/exec-all.h"
#include "tcg-op.h"
#include "tcg/tcg-op.h"
#include "exec/cpu_ldst.h"
#include "exec/helper-gen.h"

View File

@ -24,7 +24,7 @@
#include "exec/log.h"
#include "disas/disas.h"
#include "exec/exec-all.h"
#include "tcg-op.h"
#include "tcg/tcg-op.h"
#include "exec/cpu_ldst.h"
#include "linux-user/syscall_defs.h"

View File

@ -22,7 +22,7 @@
#include "cpu.h"
#include "disas/disas.h"
#include "exec/exec-all.h"
#include "tcg-op.h"
#include "tcg/tcg-op.h"
#include "exec/cpu_ldst.h"
#include "qemu/qemu-print.h"

View File

@ -133,8 +133,6 @@ void cpu_asr_write(CPUUniCore32State *env1, target_ulong val, target_ulong mask)
int uc32_cpu_signal_handler(int host_signum, void *pinfo, void *puc);
/* MMU modes definitions */
#define MMU_MODE0_SUFFIX _kernel
#define MMU_MODE1_SUFFIX _user
#define MMU_USER_IDX 1
static inline int cpu_mmu_index(CPUUniCore32State *env, bool ifetch)
{

View File

@ -13,7 +13,7 @@
#include "cpu.h"
#include "disas/disas.h"
#include "exec/exec-all.h"
#include "tcg-op.h"
#include "tcg/tcg-op.h"
#include "qemu/log.h"
#include "exec/cpu_ldst.h"
#include "exec/translator.h"

View File

@ -689,10 +689,6 @@ static inline uint32_t xtensa_replicate_windowstart(CPUXtensaState *env)
}
/* MMU modes definitions */
#define MMU_MODE0_SUFFIX _ring0
#define MMU_MODE1_SUFFIX _ring1
#define MMU_MODE2_SUFFIX _ring2
#define MMU_MODE3_SUFFIX _ring3
#define MMU_USER_IDX 3
static inline int cpu_mmu_index(CPUXtensaState *env, bool ifetch)

View File

@ -63,10 +63,11 @@
void HELPER(itlb_hit_test)(CPUXtensaState *env, uint32_t vaddr)
{
/*
* Attempt the memory load; we don't care about the result but
* Probe the memory; we don't care about the result but
* only the side-effects (ie any MMU or other exception)
*/
cpu_ldub_code_ra(env, vaddr, GETPC());
probe_access(env, vaddr, 1, MMU_INST_FETCH,
cpu_mmu_index(env, true), GETPC());
}
void HELPER(wsr_rasid)(CPUXtensaState *env, uint32_t v)

View File

@ -33,7 +33,7 @@
#include "cpu.h"
#include "exec/exec-all.h"
#include "disas/disas.h"
#include "tcg-op.h"
#include "tcg/tcg-op.h"
#include "qemu/log.h"
#include "qemu/qemu-print.h"
#include "exec/cpu_ldst.h"

View File

@ -10,7 +10,7 @@
* See the COPYING file in the top-level directory for details.
*/
#include "tcg-pool.inc.c"
#include "../tcg-pool.inc.c"
#include "qemu/bitops.h"
/* We're going to re-use TCGType in setting of the SF bit, which controls
@ -1541,7 +1541,7 @@ static void tcg_out_cltz(TCGContext *s, TCGType ext, TCGReg d,
}
#ifdef CONFIG_SOFTMMU
#include "tcg-ldst.inc.c"
#include "../tcg-ldst.inc.c"
/* helper signature: helper_ret_ld_mmu(CPUState *env, target_ulong addr,
* TCGMemOpIdx oi, uintptr_t ra)

View File

@ -23,7 +23,7 @@
*/
#include "elf.h"
#include "tcg-pool.inc.c"
#include "../tcg-pool.inc.c"
int arm_arch = __ARM_ARCH;
@ -1131,7 +1131,7 @@ static TCGCond tcg_out_cmp2(TCGContext *s, const TCGArg *args,
}
#ifdef CONFIG_SOFTMMU
#include "tcg-ldst.inc.c"
#include "../tcg-ldst.inc.c"
/* helper signature: helper_ret_ld_mmu(CPUState *env, target_ulong addr,
* int mmu_idx, uintptr_t ra)

View File

@ -223,7 +223,7 @@ static inline void tb_target_set_jmp_target(uintptr_t tc_ptr,
* The x86 has a pretty strong memory ordering which only really
* allows for some stores to be re-ordered after loads.
*/
#include "tcg-mo.h"
#include "tcg/tcg-mo.h"
#define TCG_TARGET_DEFAULT_MO (TCG_MO_ALL & ~TCG_MO_ST_LD)

View File

@ -22,7 +22,7 @@
* THE SOFTWARE.
*/
#include "tcg-pool.inc.c"
#include "../tcg-pool.inc.c"
#ifdef CONFIG_DEBUG_TCG
static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
@ -1647,7 +1647,7 @@ static void tcg_out_nopn(TCGContext *s, int n)
}
#if defined(CONFIG_SOFTMMU)
#include "tcg-ldst.inc.c"
#include "../tcg-ldst.inc.c"
/* helper signature: helper_ret_ld_mmu(CPUState *env, target_ulong addr,
* int mmu_idx, uintptr_t ra)

View File

@ -1107,7 +1107,7 @@ static void tcg_out_call(TCGContext *s, tcg_insn_unit *arg)
}
#if defined(CONFIG_SOFTMMU)
#include "tcg-ldst.inc.c"
#include "../tcg-ldst.inc.c"
static void * const qemu_ld_helpers[16] = {
[MO_UB] = helper_ret_ldub_mmu,

View File

@ -24,7 +24,7 @@
*/
#include "qemu/osdep.h"
#include "tcg-op.h"
#include "tcg/tcg-op.h"
#define CASE_OP_32_64(x) \
glue(glue(case INDEX_op_, x), _i32): \

View File

@ -23,7 +23,7 @@
*/
#include "elf.h"
#include "tcg-pool.inc.c"
#include "../tcg-pool.inc.c"
#if defined _CALL_DARWIN || defined __APPLE__
#define TCG_TARGET_CALL_DARWIN
@ -1845,7 +1845,7 @@ static const uint32_t qemu_exts_opc[4] = {
};
#if defined (CONFIG_SOFTMMU)
#include "tcg-ldst.inc.c"
#include "../tcg-ldst.inc.c"
/* helper signature: helper_ld_mmu(CPUState *env, target_ulong addr,
* int mmu_idx, uintptr_t ra)

View File

@ -27,7 +27,7 @@
* THE SOFTWARE.
*/
#include "tcg-pool.inc.c"
#include "../tcg-pool.inc.c"
#ifdef CONFIG_DEBUG_TCG
static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
@ -921,7 +921,7 @@ static void tcg_out_mb(TCGContext *s, TCGArg a0)
*/
#if defined(CONFIG_SOFTMMU)
#include "tcg-ldst.inc.c"
#include "../tcg-ldst.inc.c"
/* helper signature: helper_ret_ld_mmu(CPUState *env, target_ulong addr,
* TCGMemOpIdx oi, uintptr_t ra)

View File

@ -29,7 +29,7 @@
#error "unsupported code generation mode"
#endif
#include "tcg-pool.inc.c"
#include "../tcg-pool.inc.c"
#include "elf.h"
/* ??? The translation blocks produced by TCG are generally small enough to
@ -1536,7 +1536,7 @@ static void tcg_out_qemu_st_direct(TCGContext *s, MemOp opc, TCGReg data,
}
#if defined(CONFIG_SOFTMMU)
#include "tcg-ldst.inc.c"
#include "../tcg-ldst.inc.c"
/* We're expecting to use a 20-bit negative offset on the tlb memory ops. */
QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) > 0);

View File

@ -22,7 +22,7 @@
* THE SOFTWARE.
*/
#include "tcg-pool.inc.c"
#include "../tcg-pool.inc.c"
#ifdef CONFIG_DEBUG_TCG
static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {

View File

@ -32,7 +32,7 @@ uintptr_t tci_tb_ptr;
TCGOpDef tcg_op_defs[] = {
#define DEF(s, oargs, iargs, cargs, flags) \
{ #s, oargs, iargs, cargs, iargs + oargs + cargs, flags },
#include "tcg-opc.h"
#include "tcg/tcg-opc.h"
#undef DEF
};
const size_t tcg_op_defs_max = ARRAY_SIZE(tcg_op_defs);

View File

@ -18,11 +18,11 @@
*/
#include "qemu/osdep.h"
#include "tcg.h"
#include "tcg-op.h"
#include "tcg-op-gvec.h"
#include "tcg/tcg.h"
#include "tcg/tcg-op.h"
#include "tcg/tcg-op-gvec.h"
#include "qemu/main-loop.h"
#include "tcg-gvec-desc.h"
#include "tcg/tcg-gvec-desc.h"
#define MAX_UNROLL 4

View File

@ -19,9 +19,9 @@
#include "qemu/osdep.h"
#include "cpu.h"
#include "tcg.h"
#include "tcg-op.h"
#include "tcg-mo.h"
#include "tcg/tcg.h"
#include "tcg/tcg-op.h"
#include "tcg/tcg-mo.h"
/* Reduce the number of ifdefs below. This assumes that all uses of
TCGV_HIGH and TCGV_LOW are properly protected by a conditional that

View File

@ -25,9 +25,9 @@
#include "qemu/osdep.h"
#include "cpu.h"
#include "exec/exec-all.h"
#include "tcg.h"
#include "tcg-op.h"
#include "tcg-mo.h"
#include "tcg/tcg.h"
#include "tcg/tcg-op.h"
#include "tcg/tcg-mo.h"
#include "trace-tcg.h"
#include "trace/mem.h"
#include "exec/plugin-gen.h"

View File

@ -48,7 +48,7 @@
#include "hw/boards.h"
#endif
#include "tcg-op.h"
#include "tcg/tcg-op.h"
#if UINTPTR_MAX == UINT32_MAX
# define ELF_CLASS ELFCLASS32

View File

@ -30,7 +30,7 @@
#include "qemu-common.h"
#include "tcg/tcg.h" /* MAX_OPC_PARAM_IARGS */
#include "exec/cpu_ldst.h"
#include "tcg-op.h"
#include "tcg/tcg-op.h"
/* Marker for missing code. */
#define TODO() \

View File

@ -47,21 +47,4 @@ static inline uint16_t trace_mem_get_info(MemOp op,
mmu_idx);
}
/* Used by the atomic helpers */
static inline
uint16_t trace_mem_build_info_no_se_be(int size_shift, bool store,
TCGMemOpIdx oi)
{
return trace_mem_build_info(size_shift, false, MO_BE, store,
get_mmuidx(oi));
}
static inline
uint16_t trace_mem_build_info_no_se_le(int size_shift, bool store,
TCGMemOpIdx oi)
{
return trace_mem_build_info(size_shift, false, MO_LE, store,
get_mmuidx(oi));
}
#endif /* TRACE__MEM_INTERNAL_H */