queued tcg related patches

-----BEGIN PGP SIGNATURE-----
 Version: GnuPG v1
 
 iQEcBAABAgAGBQJV8vBsAAoJEK0ScMxN0CebYTcIAMaKhlMb+s7fS5JLrL9f2YR0
 XHwxcbDkRipC57uNT3QOKbX5qruq1iEqKTzcUGaT9Ef6+JMOqA8l9WGMMbrjZrtE
 SRSYI74NcH1U4fWd8uscDBmxj9CIvNG+pxbABDw7IV2wpNWM9IDl1OHtnXp2McJ0
 hGLhtigoEbinFopiQ0R+4R82OpiSB+22w0MlOFQbROSgqnkaY4+5lfPaAGXZEGSn
 8BSZiX4C3kyZXsIbEMKLcVrujtOCFi9dvuW2ZGvt0adYX954vcf/bfpWacS3tXb/
 8AO4W60oNar8vmQFea3bBu4DHfQgsB9dsSvBSFNFdscEjeJHFZrwKSHB92SMRqk=
 =1Oga
 -----END PGP SIGNATURE-----

Merge remote-tracking branch 'remotes/rth/tags/pull-tcg-20150911' into staging

queued tcg related patches

# gpg: Signature made Fri 11 Sep 2015 16:17:00 BST using RSA key ID 4DD0279B
# gpg: Good signature from "Richard Henderson <rth7680@gmail.com>"
# gpg:                 aka "Richard Henderson <rth@redhat.com>"
# gpg:                 aka "Richard Henderson <rth@twiddle.net>"

* remotes/rth/tags/pull-tcg-20150911:
  cpu-exec: introduce loop exit with restore function
  softmmu: remove now unused functions
  softmmu: add helper function to pass through retaddr
  tlb: Add "ifetch" argument to cpu_mmu_index()

Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
Peter Maydell 2015-09-11 18:01:56 +01:00
commit 8f6e82e4ec
38 changed files with 155 additions and 95 deletions

View File

@ -134,6 +134,15 @@ void cpu_loop_exit(CPUState *cpu)
siglongjmp(cpu->jmp_env, 1);
}
void cpu_loop_exit_restore(CPUState *cpu, uintptr_t pc)
{
if (pc) {
cpu_restore_state(cpu, pc);
}
cpu->current_tb = NULL;
siglongjmp(cpu->jmp_env, 1);
}
/* exit the current TB from a signal handler. The host registers are
restored in a state compatible with the CPU emulator
*/

View File

@ -452,7 +452,7 @@ tb_page_addr_t get_page_addr_code(CPUArchState *env1, target_ulong addr)
CPUState *cpu = ENV_GET_CPU(env1);
page_index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
mmu_idx = cpu_mmu_index(env1);
mmu_idx = cpu_mmu_index(env1, true);
if (unlikely(env1->tlb_table[mmu_idx][page_index].addr_code !=
(addr & TARGET_PAGE_MASK))) {
cpu_ldub_code(env1, addr);

View File

@ -113,25 +113,6 @@
/* The memory helpers for tcg-generated code need tcg_target_long etc. */
#include "tcg.h"
uint8_t helper_ldb_mmu(CPUArchState *env, target_ulong addr, int mmu_idx);
uint16_t helper_ldw_mmu(CPUArchState *env, target_ulong addr, int mmu_idx);
uint32_t helper_ldl_mmu(CPUArchState *env, target_ulong addr, int mmu_idx);
uint64_t helper_ldq_mmu(CPUArchState *env, target_ulong addr, int mmu_idx);
void helper_stb_mmu(CPUArchState *env, target_ulong addr,
uint8_t val, int mmu_idx);
void helper_stw_mmu(CPUArchState *env, target_ulong addr,
uint16_t val, int mmu_idx);
void helper_stl_mmu(CPUArchState *env, target_ulong addr,
uint32_t val, int mmu_idx);
void helper_stq_mmu(CPUArchState *env, target_ulong addr,
uint64_t val, int mmu_idx);
uint8_t helper_ldb_cmmu(CPUArchState *env, target_ulong addr, int mmu_idx);
uint16_t helper_ldw_cmmu(CPUArchState *env, target_ulong addr, int mmu_idx);
uint32_t helper_ldl_cmmu(CPUArchState *env, target_ulong addr, int mmu_idx);
uint64_t helper_ldq_cmmu(CPUArchState *env, target_ulong addr, int mmu_idx);
#ifdef MMU_MODE0_SUFFIX
#define CPU_MMU_INDEX 0
#define MEMSUFFIX MMU_MODE0_SUFFIX
@ -363,7 +344,7 @@ uint64_t helper_ldq_cmmu(CPUArchState *env, target_ulong addr, int mmu_idx);
#endif /* (NB_MMU_MODES > 12) */
/* these access are slower, they must be as rare as possible */
#define CPU_MMU_INDEX (cpu_mmu_index(env))
#define CPU_MMU_INDEX (cpu_mmu_index(env, false))
#define MEMSUFFIX _data
#define DATA_SIZE 1
#include "exec/cpu_ldst_template.h"
@ -379,7 +360,7 @@ uint64_t helper_ldq_cmmu(CPUArchState *env, target_ulong addr, int mmu_idx);
#undef CPU_MMU_INDEX
#undef MEMSUFFIX
#define CPU_MMU_INDEX (cpu_mmu_index(env))
#define CPU_MMU_INDEX (cpu_mmu_index(env, true))
#define MEMSUFFIX _code
#define SOFTMMU_CODE_ACCESS

View File

@ -27,20 +27,24 @@
#define SUFFIX q
#define USUFFIX q
#define DATA_TYPE uint64_t
#define SHIFT 3
#elif DATA_SIZE == 4
#define SUFFIX l
#define USUFFIX l
#define DATA_TYPE uint32_t
#define SHIFT 2
#elif DATA_SIZE == 2
#define SUFFIX w
#define USUFFIX uw
#define DATA_TYPE uint16_t
#define DATA_STYPE int16_t
#define SHIFT 1
#elif DATA_SIZE == 1
#define SUFFIX b
#define USUFFIX ub
#define DATA_TYPE uint8_t
#define DATA_STYPE int8_t
#define SHIFT 0
#else
#error unsupported data size
#endif
@ -54,27 +58,36 @@
#ifdef SOFTMMU_CODE_ACCESS
#define ADDR_READ addr_code
#define MMUSUFFIX _cmmu
#define URETSUFFIX SUFFIX
#define SRETSUFFIX SUFFIX
#else
#define ADDR_READ addr_read
#define MMUSUFFIX _mmu
#define URETSUFFIX USUFFIX
#define SRETSUFFIX glue(s, SUFFIX)
#endif
/* generic load/store macros */
static inline RES_TYPE
glue(glue(cpu_ld, USUFFIX), MEMSUFFIX)(CPUArchState *env, target_ulong ptr)
glue(glue(glue(cpu_ld, USUFFIX), MEMSUFFIX), _ra)(CPUArchState *env,
target_ulong ptr,
uintptr_t retaddr)
{
int page_index;
RES_TYPE res;
target_ulong addr;
int mmu_idx;
TCGMemOpIdx oi;
addr = ptr;
page_index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
mmu_idx = CPU_MMU_INDEX;
if (unlikely(env->tlb_table[mmu_idx][page_index].ADDR_READ !=
(addr & (TARGET_PAGE_MASK | (DATA_SIZE - 1))))) {
res = glue(glue(helper_ld, SUFFIX), MMUSUFFIX)(env, addr, mmu_idx);
oi = make_memop_idx(SHIFT, mmu_idx);
res = glue(glue(helper_ret_ld, URETSUFFIX), MMUSUFFIX)(env, addr,
oi, retaddr);
} else {
uintptr_t hostaddr = addr + env->tlb_table[mmu_idx][page_index].addend;
res = glue(glue(ld, USUFFIX), _p)((uint8_t *)hostaddr);
@ -82,27 +95,43 @@ glue(glue(cpu_ld, USUFFIX), MEMSUFFIX)(CPUArchState *env, target_ulong ptr)
return res;
}
static inline RES_TYPE
glue(glue(cpu_ld, USUFFIX), MEMSUFFIX)(CPUArchState *env, target_ulong ptr)
{
return glue(glue(glue(cpu_ld, USUFFIX), MEMSUFFIX), _ra)(env, ptr, 0);
}
#if DATA_SIZE <= 2
static inline int
glue(glue(cpu_lds, SUFFIX), MEMSUFFIX)(CPUArchState *env, target_ulong ptr)
glue(glue(glue(cpu_lds, SUFFIX), MEMSUFFIX), _ra)(CPUArchState *env,
target_ulong ptr,
uintptr_t retaddr)
{
int res, page_index;
target_ulong addr;
int mmu_idx;
TCGMemOpIdx oi;
addr = ptr;
page_index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
mmu_idx = CPU_MMU_INDEX;
if (unlikely(env->tlb_table[mmu_idx][page_index].ADDR_READ !=
(addr & (TARGET_PAGE_MASK | (DATA_SIZE - 1))))) {
res = (DATA_STYPE)glue(glue(helper_ld, SUFFIX),
MMUSUFFIX)(env, addr, mmu_idx);
oi = make_memop_idx(SHIFT, mmu_idx);
res = (DATA_STYPE)glue(glue(helper_ret_ld, SRETSUFFIX),
MMUSUFFIX)(env, addr, oi, retaddr);
} else {
uintptr_t hostaddr = addr + env->tlb_table[mmu_idx][page_index].addend;
res = glue(glue(lds, SUFFIX), _p)((uint8_t *)hostaddr);
}
return res;
}
static inline int
glue(glue(cpu_lds, SUFFIX), MEMSUFFIX)(CPUArchState *env, target_ulong ptr)
{
return glue(glue(glue(cpu_lds, SUFFIX), MEMSUFFIX), _ra)(env, ptr, 0);
}
#endif
#ifndef SOFTMMU_CODE_ACCESS
@ -110,25 +139,36 @@ glue(glue(cpu_lds, SUFFIX), MEMSUFFIX)(CPUArchState *env, target_ulong ptr)
/* generic store macro */
static inline void
glue(glue(cpu_st, SUFFIX), MEMSUFFIX)(CPUArchState *env, target_ulong ptr,
RES_TYPE v)
glue(glue(glue(cpu_st, SUFFIX), MEMSUFFIX), _ra)(CPUArchState *env,
target_ulong ptr,
RES_TYPE v, uintptr_t retaddr)
{
int page_index;
target_ulong addr;
int mmu_idx;
TCGMemOpIdx oi;
addr = ptr;
page_index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
mmu_idx = CPU_MMU_INDEX;
if (unlikely(env->tlb_table[mmu_idx][page_index].addr_write !=
(addr & (TARGET_PAGE_MASK | (DATA_SIZE - 1))))) {
glue(glue(helper_st, SUFFIX), MMUSUFFIX)(env, addr, v, mmu_idx);
oi = make_memop_idx(SHIFT, mmu_idx);
glue(glue(helper_ret_st, SUFFIX), MMUSUFFIX)(env, addr, v, oi,
retaddr);
} else {
uintptr_t hostaddr = addr + env->tlb_table[mmu_idx][page_index].addend;
glue(glue(st, SUFFIX), _p)((uint8_t *)hostaddr, v);
}
}
static inline void
glue(glue(cpu_st, SUFFIX), MEMSUFFIX)(CPUArchState *env, target_ulong ptr,
RES_TYPE v)
{
glue(glue(glue(cpu_st, SUFFIX), MEMSUFFIX), _ra)(env, ptr, v, 0);
}
#endif /* !SOFTMMU_CODE_ACCESS */
#undef RES_TYPE
@ -139,3 +179,6 @@ glue(glue(cpu_st, SUFFIX), MEMSUFFIX)(CPUArchState *env, target_ulong ptr,
#undef DATA_SIZE
#undef MMUSUFFIX
#undef ADDR_READ
#undef URETSUFFIX
#undef SRETSUFFIX
#undef SHIFT

View File

@ -56,12 +56,28 @@ glue(glue(cpu_ld, USUFFIX), MEMSUFFIX)(CPUArchState *env, target_ulong ptr)
return glue(glue(ld, USUFFIX), _p)(g2h(ptr));
}
static inline RES_TYPE
glue(glue(glue(cpu_ld, USUFFIX), MEMSUFFIX), _ra)(CPUArchState *env,
target_ulong ptr,
uintptr_t retaddr)
{
return glue(glue(cpu_ld, USUFFIX), MEMSUFFIX)(env, ptr);
}
#if DATA_SIZE <= 2
static inline int
glue(glue(cpu_lds, SUFFIX), MEMSUFFIX)(CPUArchState *env, target_ulong ptr)
{
return glue(glue(lds, SUFFIX), _p)(g2h(ptr));
}
static inline int
glue(glue(glue(cpu_lds, SUFFIX), MEMSUFFIX), _ra)(CPUArchState *env,
target_ulong ptr,
uintptr_t retaddr)
{
return glue(glue(cpu_lds, SUFFIX), MEMSUFFIX)(env, ptr);
}
#endif
#ifndef CODE_ACCESS
@ -71,6 +87,15 @@ glue(glue(cpu_st, SUFFIX), MEMSUFFIX)(CPUArchState *env, target_ulong ptr,
{
glue(glue(st, SUFFIX), _p)(g2h(ptr), v);
}
static inline void
glue(glue(glue(cpu_st, SUFFIX), MEMSUFFIX), _ra)(CPUArchState *env,
target_ulong ptr,
RES_TYPE v,
uintptr_t retaddr)
{
glue(glue(cpu_st, SUFFIX), MEMSUFFIX)(env, ptr, v);
}
#endif
#undef RES_TYPE

View File

@ -90,6 +90,7 @@ TranslationBlock *tb_gen_code(CPUState *cpu,
int cflags);
void cpu_exec_init(CPUState *cpu, Error **errp);
void QEMU_NORETURN cpu_loop_exit(CPUState *cpu);
void QEMU_NORETURN cpu_loop_exit_restore(CPUState *cpu, uintptr_t pc);
#if !defined(CONFIG_USER_ONLY)
bool qemu_in_vcpu_thread(void);

View File

@ -165,9 +165,6 @@ static inline DATA_TYPE glue(io_read, SUFFIX)(CPUArchState *env,
}
#endif
#ifdef SOFTMMU_CODE_ACCESS
static __attribute__((unused))
#endif
WORD_TYPE helper_le_ld_name(CPUArchState *env, target_ulong addr,
TCGMemOpIdx oi, uintptr_t retaddr)
{
@ -252,9 +249,6 @@ WORD_TYPE helper_le_ld_name(CPUArchState *env, target_ulong addr,
}
#if DATA_SIZE > 1
#ifdef SOFTMMU_CODE_ACCESS
static __attribute__((unused))
#endif
WORD_TYPE helper_be_ld_name(CPUArchState *env, target_ulong addr,
TCGMemOpIdx oi, uintptr_t retaddr)
{
@ -335,14 +329,6 @@ WORD_TYPE helper_be_ld_name(CPUArchState *env, target_ulong addr,
}
#endif /* DATA_SIZE > 1 */
DATA_TYPE
glue(glue(helper_ld, SUFFIX), MMUSUFFIX)(CPUArchState *env, target_ulong addr,
int mmu_idx)
{
TCGMemOpIdx oi = make_memop_idx(SHIFT, mmu_idx);
return helper_te_ld_name (env, addr, oi, GETRA());
}
#ifndef SOFTMMU_CODE_ACCESS
/* Provide signed versions of the load routines as well. We can of course
@ -540,14 +526,6 @@ void helper_be_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
}
#endif /* DATA_SIZE > 1 */
void
glue(glue(helper_st, SUFFIX), MMUSUFFIX)(CPUArchState *env, target_ulong addr,
DATA_TYPE val, int mmu_idx)
{
TCGMemOpIdx oi = make_memop_idx(SHIFT, mmu_idx);
helper_te_st_name(env, addr, val, oi, GETRA());
}
#if DATA_SIZE == 1
/* Probe for whether the specified guest write access is permitted.
* If it is not permitted then an exception will be taken in the same

View File

@ -376,7 +376,7 @@ enum {
PS_USER_MODE = 8
};
static inline int cpu_mmu_index(CPUAlphaState *env)
static inline int cpu_mmu_index(CPUAlphaState *env, bool ifetch)
{
if (env->pal_mode) {
return MMU_KERNEL_IDX;

View File

@ -2878,7 +2878,7 @@ static inline void gen_intermediate_code_internal(AlphaCPU *cpu,
ctx.tb = tb;
ctx.pc = pc_start;
ctx.mem_idx = cpu_mmu_index(env);
ctx.mem_idx = cpu_mmu_index(env, false);
ctx.implver = env->implver;
ctx.singlestep_enabled = cs->singlestep_enabled;

View File

@ -1678,7 +1678,7 @@ static inline int arm_mmu_idx_to_el(ARMMMUIdx mmu_idx)
}
/* Determine the current mmu_idx to use for normal loads/stores */
static inline int cpu_mmu_index(CPUARMState *env)
static inline int cpu_mmu_index(CPUARMState *env, bool ifetch)
{
int el = arm_current_el(env);
@ -1911,7 +1911,7 @@ static inline void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc,
<< ARM_TBFLAG_XSCALE_CPAR_SHIFT);
}
*flags |= (cpu_mmu_index(env) << ARM_TBFLAG_MMUIDX_SHIFT);
*flags |= (cpu_mmu_index(env, false) << ARM_TBFLAG_MMUIDX_SHIFT);
/* The SS_ACTIVE and PSTATE_SS bits correspond to the state machine
* states defined in the ARM ARM for software singlestep:
* SS_ACTIVE PSTATE.SS State

View File

@ -6892,7 +6892,7 @@ hwaddr arm_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
uint32_t fsr;
MemTxAttrs attrs = {};
ret = get_phys_addr(env, addr, 0, cpu_mmu_index(env), &phys_addr,
ret = get_phys_addr(env, addr, 0, cpu_mmu_index(env, false), &phys_addr,
&attrs, &prot, &page_size, &fsr);
if (ret) {
@ -7057,7 +7057,7 @@ void HELPER(dc_zva)(CPUARMState *env, uint64_t vaddr_in)
int maxidx = DIV_ROUND_UP(blocklen, TARGET_PAGE_SIZE);
void *hostaddr[maxidx];
int try, i;
unsigned mmu_idx = cpu_mmu_index(env);
unsigned mmu_idx = cpu_mmu_index(env, false);
TCGMemOpIdx oi = make_memop_idx(MO_UB, mmu_idx);
for (try = 0; try < 2; try++) {

View File

@ -233,7 +233,7 @@ enum {
#define MMU_MODE0_SUFFIX _kernel
#define MMU_MODE1_SUFFIX _user
#define MMU_USER_IDX 1
static inline int cpu_mmu_index (CPUCRISState *env)
static inline int cpu_mmu_index (CPUCRISState *env, bool ifetch)
{
return !!(env->pregs[PR_CCS] & U_FLAG);
}

View File

@ -1083,7 +1083,7 @@ static inline void cris_prepare_jmp (DisasContext *dc, unsigned int type)
static void gen_load64(DisasContext *dc, TCGv_i64 dst, TCGv addr)
{
int mem_index = cpu_mmu_index(&dc->cpu->env);
int mem_index = cpu_mmu_index(&dc->cpu->env, false);
/* If we get a fault on a delayslot we must keep the jmp state in
the cpu-state to be able to re-execute the jmp. */
@ -1097,7 +1097,7 @@ static void gen_load64(DisasContext *dc, TCGv_i64 dst, TCGv addr)
static void gen_load(DisasContext *dc, TCGv dst, TCGv addr,
unsigned int size, int sign)
{
int mem_index = cpu_mmu_index(&dc->cpu->env);
int mem_index = cpu_mmu_index(&dc->cpu->env, false);
/* If we get a fault on a delayslot we must keep the jmp state in
the cpu-state to be able to re-execute the jmp. */
@ -1112,7 +1112,7 @@ static void gen_load(DisasContext *dc, TCGv dst, TCGv addr,
static void gen_store (DisasContext *dc, TCGv addr, TCGv val,
unsigned int size)
{
int mem_index = cpu_mmu_index(&dc->cpu->env);
int mem_index = cpu_mmu_index(&dc->cpu->env, false);
/* If we get a fault on a delayslot we must keep the jmp state in
the cpu-state to be able to re-execute the jmp. */

View File

@ -96,7 +96,7 @@ static void gen_store_v10_conditional(DisasContext *dc, TCGv addr, TCGv val,
static void gen_store_v10(DisasContext *dc, TCGv addr, TCGv val,
unsigned int size)
{
int mem_index = cpu_mmu_index(&dc->cpu->env);
int mem_index = cpu_mmu_index(&dc->cpu->env, false);
/* If we get a fault on a delayslot we must keep the jmp state in
the cpu-state to be able to re-execute the jmp. */

View File

@ -1199,7 +1199,7 @@ uint64_t cpu_get_tsc(CPUX86State *env);
#define MMU_KSMAP_IDX 0
#define MMU_USER_IDX 1
#define MMU_KNOSMAP_IDX 2
static inline int cpu_mmu_index(CPUX86State *env)
static inline int cpu_mmu_index(CPUX86State *env, bool ifetch)
{
return (env->hflags & HF_CPL_MASK) == 3 ? MMU_USER_IDX :
(!(env->hflags & HF_SMAP_MASK) || (env->eflags & AC_MASK))

View File

@ -7942,7 +7942,7 @@ static inline void gen_intermediate_code_internal(X86CPU *cpu,
/* select memory access functions */
dc->mem_index = 0;
if (flags & HF_SOFTMMU_MASK) {
dc->mem_index = cpu_mmu_index(env);
dc->mem_index = cpu_mmu_index(env, false);
}
dc->cpuid_features = env->features[FEAT_1_EDX];
dc->cpuid_ext_features = env->features[FEAT_1_ECX];

View File

@ -34,7 +34,7 @@ typedef struct CPULM32State CPULM32State;
#define NB_MMU_MODES 1
#define TARGET_PAGE_BITS 12
static inline int cpu_mmu_index(CPULM32State *env)
static inline int cpu_mmu_index(CPULM32State *env, bool ifetch)
{
return 0;
}

View File

@ -223,7 +223,7 @@ void register_m68k_insns (CPUM68KState *env);
#define MMU_MODE0_SUFFIX _kernel
#define MMU_MODE1_SUFFIX _user
#define MMU_USER_IDX 1
static inline int cpu_mmu_index (CPUM68KState *env)
static inline int cpu_mmu_index (CPUM68KState *env, bool ifetch)
{
return (env->sr & SR_S) == 0 ? 1 : 0;
}

View File

@ -309,7 +309,7 @@ int cpu_mb_signal_handler(int host_signum, void *pinfo,
#define MMU_USER_IDX 2
/* See NB_MMU_MODES further up the file. */
static inline int cpu_mmu_index (CPUMBState *env)
static inline int cpu_mmu_index (CPUMBState *env, bool ifetch)
{
/* Are we in nommu mode?. */
if (!(env->sregs[SR_MSR] & MSR_VM))

View File

@ -279,7 +279,7 @@ void mmu_write(CPUMBState *env, uint32_t rn, uint32_t v)
}
hit = mmu_translate(&env->mmu, &lu,
v & TLB_EPN_MASK, 0, cpu_mmu_index(env));
v & TLB_EPN_MASK, 0, cpu_mmu_index(env, false));
if (hit) {
env->mmu.regs[MMU_R_TLBX] = lu.idx;
} else

View File

@ -418,7 +418,7 @@ static void dec_msr(DisasContext *dc)
CPUState *cs = CPU(dc->cpu);
TCGv t0, t1;
unsigned int sr, to, rn;
int mem_index = cpu_mmu_index(&dc->cpu->env);
int mem_index = cpu_mmu_index(&dc->cpu->env, false);
sr = dc->imm & ((1 << 14) - 1);
to = dc->imm & (1 << 14);
@ -730,7 +730,7 @@ static void dec_bit(DisasContext *dc)
CPUState *cs = CPU(dc->cpu);
TCGv t0;
unsigned int op;
int mem_index = cpu_mmu_index(&dc->cpu->env);
int mem_index = cpu_mmu_index(&dc->cpu->env, false);
op = dc->ir & ((1 << 9) - 1);
switch (op) {
@ -994,7 +994,7 @@ static void dec_load(DisasContext *dc)
* address and if that succeeds we write into the destination reg.
*/
v = tcg_temp_new();
tcg_gen_qemu_ld_tl(v, *addr, cpu_mmu_index(&dc->cpu->env), mop);
tcg_gen_qemu_ld_tl(v, *addr, cpu_mmu_index(&dc->cpu->env, false), mop);
if ((dc->cpu->env.pvr.regs[2] & PVR2_UNALIGNED_EXC_MASK) && size > 1) {
tcg_gen_movi_tl(cpu_SR[SR_PC], dc->pc);
@ -1072,7 +1072,7 @@ static void dec_store(DisasContext *dc)
this compare and the following write to be atomic. For user
emulation we need to add atomicity between threads. */
tval = tcg_temp_new();
tcg_gen_qemu_ld_tl(tval, swx_addr, cpu_mmu_index(&dc->cpu->env),
tcg_gen_qemu_ld_tl(tval, swx_addr, cpu_mmu_index(&dc->cpu->env, false),
MO_TEUL);
tcg_gen_brcond_tl(TCG_COND_NE, env_res_val, tval, swx_skip);
write_carryi(dc, 0);
@ -1123,7 +1123,7 @@ static void dec_store(DisasContext *dc)
break;
}
}
tcg_gen_qemu_st_tl(cpu_R[dc->rd], *addr, cpu_mmu_index(&dc->cpu->env), mop);
tcg_gen_qemu_st_tl(cpu_R[dc->rd], *addr, cpu_mmu_index(&dc->cpu->env, false), mop);
/* Verify alignment if needed. */
if ((dc->cpu->env.pvr.regs[2] & PVR2_UNALIGNED_EXC_MASK) && size > 1) {
@ -1219,7 +1219,7 @@ static void dec_bcc(DisasContext *dc)
static void dec_br(DisasContext *dc)
{
unsigned int dslot, link, abs, mbar;
int mem_index = cpu_mmu_index(&dc->cpu->env);
int mem_index = cpu_mmu_index(&dc->cpu->env, false);
dslot = dc->ir & (1 << 20);
abs = dc->ir & (1 << 19);
@ -1351,7 +1351,7 @@ static inline void do_rte(DisasContext *dc)
static void dec_rts(DisasContext *dc)
{
unsigned int b_bit, i_bit, e_bit;
int mem_index = cpu_mmu_index(&dc->cpu->env);
int mem_index = cpu_mmu_index(&dc->cpu->env, false);
i_bit = dc->ir & (1 << 21);
b_bit = dc->ir & (1 << 22);
@ -1523,7 +1523,7 @@ static void dec_null(DisasContext *dc)
/* Insns connected to FSL or AXI stream attached devices. */
static void dec_stream(DisasContext *dc)
{
int mem_index = cpu_mmu_index(&dc->cpu->env);
int mem_index = cpu_mmu_index(&dc->cpu->env, false);
TCGv_i32 t_id, t_ctrl;
int ctrl;

View File

@ -634,7 +634,7 @@ extern uint32_t cpu_rddsp(uint32_t mask_num, CPUMIPSState *env);
#define MMU_MODE1_SUFFIX _super
#define MMU_MODE2_SUFFIX _user
#define MMU_USER_IDX 2
static inline int cpu_mmu_index (CPUMIPSState *env)
static inline int cpu_mmu_index (CPUMIPSState *env, bool ifetch)
{
return env->hflags & MIPS_HFLAG_KSU;
}

View File

@ -3629,7 +3629,7 @@ FOP_CONDN_S(sne, (float32_lt(fst1, fst0, &env->active_fpu.fp_status)
#if !defined(CONFIG_USER_ONLY)
#define MEMOP_IDX(DF) \
TCGMemOpIdx oi = make_memop_idx(MO_TE | DF | MO_UNALN, \
cpu_mmu_index(env));
cpu_mmu_index(env, false));
#else
#define MEMOP_IDX(DF)
#endif
@ -3685,7 +3685,7 @@ void helper_msa_st_ ## TYPE(CPUMIPSState *env, uint32_t wd, \
target_ulong addr) \
{ \
wr_t *pwd = &(env->active_fpu.fpr[wd].wr); \
int mmu_idx = cpu_mmu_index(env); \
int mmu_idx = cpu_mmu_index(env, false); \
int i; \
MEMOP_IDX(DF) \
ensure_writable_pages(env, addr, mmu_idx, GETRA()); \

View File

@ -127,7 +127,7 @@ int cpu_moxie_signal_handler(int host_signum, void *pinfo,
#define cpu_gen_code cpu_moxie_gen_code
#define cpu_signal_handler cpu_moxie_signal_handler
static inline int cpu_mmu_index(CPUMoxieState *env)
static inline int cpu_mmu_index(CPUMoxieState *env, bool ifetch)
{
return 0;
}

View File

@ -403,7 +403,7 @@ static inline void cpu_get_tb_cpu_state(CPUOpenRISCState *env,
*flags = (env->flags & D_FLAG);
}
static inline int cpu_mmu_index(CPUOpenRISCState *env)
static inline int cpu_mmu_index(CPUOpenRISCState *env, bool ifetch)
{
if (!(env->sr & SR_IME)) {
return MMU_NOMMU_IDX;

View File

@ -1653,7 +1653,7 @@ static inline void gen_intermediate_code_internal(OpenRISCCPU *cpu,
dc->ppc = pc_start;
dc->pc = pc_start;
dc->flags = cpu->env.cpucfgr;
dc->mem_idx = cpu_mmu_index(&cpu->env);
dc->mem_idx = cpu_mmu_index(&cpu->env, false);
dc->synced_flags = dc->tb_flags = tb->flags;
dc->delayed_branch = !!(dc->tb_flags & D_FLAG);
dc->singlestep_enabled = cs->singlestep_enabled;

View File

@ -1250,7 +1250,7 @@ int ppc_dcr_write (ppc_dcr_t *dcr_env, int dcrn, uint32_t val);
#define MMU_MODE1_SUFFIX _kernel
#define MMU_MODE2_SUFFIX _hypv
#define MMU_USER_IDX 0
static inline int cpu_mmu_index (CPUPPCState *env)
static inline int cpu_mmu_index (CPUPPCState *env, bool ifetch)
{
return env->mmu_idx;
}

View File

@ -308,7 +308,7 @@ static inline CPU_DoubleU *get_freg(CPUS390XState *cs, int nr)
#define MMU_SECONDARY_IDX 1
#define MMU_HOME_IDX 2
static inline int cpu_mmu_index (CPUS390XState *env)
static inline int cpu_mmu_index (CPUS390XState *env, bool ifetch)
{
switch (env->psw.mask & PSW_MASK_ASC) {
case PSW_ASC_PRIMARY:

View File

@ -69,7 +69,7 @@ static inline uint64_t adj_len_to_page(uint64_t len, uint64_t addr)
static void fast_memset(CPUS390XState *env, uint64_t dest, uint8_t byte,
uint32_t l)
{
int mmu_idx = cpu_mmu_index(env);
int mmu_idx = cpu_mmu_index(env, false);
while (l > 0) {
void *p = tlb_vaddr_to_host(env, dest, MMU_DATA_STORE, mmu_idx);
@ -92,7 +92,7 @@ static void fast_memset(CPUS390XState *env, uint64_t dest, uint8_t byte,
static void fast_memmove(CPUS390XState *env, uint64_t dest, uint64_t src,
uint32_t l)
{
int mmu_idx = cpu_mmu_index(env);
int mmu_idx = cpu_mmu_index(env, false);
while (l > 0) {
void *src_p = tlb_vaddr_to_host(env, src, MMU_DATA_LOAD, mmu_idx);

View File

@ -235,7 +235,7 @@ void cpu_load_tlb(CPUSH4State * env);
#define MMU_MODE0_SUFFIX _kernel
#define MMU_MODE1_SUFFIX _user
#define MMU_USER_IDX 1
static inline int cpu_mmu_index (CPUSH4State *env)
static inline int cpu_mmu_index (CPUSH4State *env, bool ifetch)
{
return (env->sr & (1u << SR_MD)) == 0 ? 1 : 0;
}

View File

@ -642,7 +642,7 @@ static inline int cpu_supervisor_mode(CPUSPARCState *env1)
}
#endif
static inline int cpu_mmu_index(CPUSPARCState *env1)
static inline int cpu_mmu_index(CPUSPARCState *env1, bool ifetch)
{
#if defined(CONFIG_USER_ONLY)
return MMU_USER_IDX;

View File

@ -849,7 +849,7 @@ hwaddr sparc_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
SPARCCPU *cpu = SPARC_CPU(cs);
CPUSPARCState *env = &cpu->env;
hwaddr phys_addr;
int mmu_idx = cpu_mmu_index(env);
int mmu_idx = cpu_mmu_index(env, false);
MemoryRegionSection section;
if (cpu_sparc_get_phys_page(env, &phys_addr, addr, 2, mmu_idx) != 0) {

View File

@ -5234,7 +5234,7 @@ static inline void gen_intermediate_code_internal(SPARCCPU *cpu,
last_pc = dc->pc;
dc->npc = (target_ulong) tb->cs_base;
dc->cc_op = CC_OP_DYNAMIC;
dc->mem_idx = cpu_mmu_index(env);
dc->mem_idx = cpu_mmu_index(env, false);
dc->def = env->def;
dc->fpu_enabled = tb_fpu_enabled(tb->flags);
dc->address_mask_32bit = tb_am_enabled(tb->flags);

View File

@ -350,7 +350,7 @@ void tricore_cpu_list(FILE *f, fprintf_function cpu_fprintf);
#define cpu_signal_handler cpu_tricore_signal_handler
#define cpu_list tricore_cpu_list
static inline int cpu_mmu_index(CPUTriCoreState *env)
static inline int cpu_mmu_index(CPUTriCoreState *env, bool ifetch)
{
return 0;
}

View File

@ -8287,7 +8287,7 @@ gen_intermediate_code_internal(TriCoreCPU *cpu, struct TranslationBlock *tb,
ctx.tb = tb;
ctx.singlestep_enabled = cs->singlestep_enabled;
ctx.bstate = BS_NONE;
ctx.mem_idx = cpu_mmu_index(env);
ctx.mem_idx = cpu_mmu_index(env, false);
tcg_clear_temp_count();
gen_tb_start(tb);

View File

@ -131,7 +131,7 @@ int uc32_cpu_signal_handler(int host_signum, void *pinfo, void *puc);
#define MMU_MODE0_SUFFIX _kernel
#define MMU_MODE1_SUFFIX _user
#define MMU_USER_IDX 1
static inline int cpu_mmu_index(CPUUniCore32State *env)
static inline int cpu_mmu_index(CPUUniCore32State *env, bool ifetch)
{
return (env->uncached_asr & ASR_M) == ASR_MODE_USER ? 1 : 0;
}

View File

@ -492,7 +492,7 @@ static inline uint32_t xtensa_replicate_windowstart(CPUXtensaState *env)
#define MMU_MODE2_SUFFIX _ring2
#define MMU_MODE3_SUFFIX _ring3
static inline int cpu_mmu_index(CPUXtensaState *env)
static inline int cpu_mmu_index(CPUXtensaState *env, bool ifetch)
{
return xtensa_get_cring(env);
}

View File

@ -986,25 +986,48 @@ void helper_be_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
void helper_be_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
TCGMemOpIdx oi, uintptr_t retaddr);
uint8_t helper_ret_ldb_cmmu(CPUArchState *env, target_ulong addr,
TCGMemOpIdx oi, uintptr_t retaddr);
uint16_t helper_le_ldw_cmmu(CPUArchState *env, target_ulong addr,
TCGMemOpIdx oi, uintptr_t retaddr);
uint32_t helper_le_ldl_cmmu(CPUArchState *env, target_ulong addr,
TCGMemOpIdx oi, uintptr_t retaddr);
uint64_t helper_le_ldq_cmmu(CPUArchState *env, target_ulong addr,
TCGMemOpIdx oi, uintptr_t retaddr);
uint16_t helper_be_ldw_cmmu(CPUArchState *env, target_ulong addr,
TCGMemOpIdx oi, uintptr_t retaddr);
uint32_t helper_be_ldl_cmmu(CPUArchState *env, target_ulong addr,
TCGMemOpIdx oi, uintptr_t retaddr);
uint64_t helper_be_ldq_cmmu(CPUArchState *env, target_ulong addr,
TCGMemOpIdx oi, uintptr_t retaddr);
/* Temporary aliases until backends are converted. */
#ifdef TARGET_WORDS_BIGENDIAN
# define helper_ret_ldsw_mmu helper_be_ldsw_mmu
# define helper_ret_lduw_mmu helper_be_lduw_mmu
# define helper_ret_ldsl_mmu helper_be_ldsl_mmu
# define helper_ret_ldul_mmu helper_be_ldul_mmu
# define helper_ret_ldl_mmu helper_be_ldul_mmu
# define helper_ret_ldq_mmu helper_be_ldq_mmu
# define helper_ret_stw_mmu helper_be_stw_mmu
# define helper_ret_stl_mmu helper_be_stl_mmu
# define helper_ret_stq_mmu helper_be_stq_mmu
# define helper_ret_ldw_cmmu helper_be_ldw_cmmu
# define helper_ret_ldl_cmmu helper_be_ldl_cmmu
# define helper_ret_ldq_cmmu helper_be_ldq_cmmu
#else
# define helper_ret_ldsw_mmu helper_le_ldsw_mmu
# define helper_ret_lduw_mmu helper_le_lduw_mmu
# define helper_ret_ldsl_mmu helper_le_ldsl_mmu
# define helper_ret_ldul_mmu helper_le_ldul_mmu
# define helper_ret_ldl_mmu helper_le_ldul_mmu
# define helper_ret_ldq_mmu helper_le_ldq_mmu
# define helper_ret_stw_mmu helper_le_stw_mmu
# define helper_ret_stl_mmu helper_le_stl_mmu
# define helper_ret_stq_mmu helper_le_stq_mmu
# define helper_ret_ldw_cmmu helper_le_ldw_cmmu
# define helper_ret_ldl_cmmu helper_le_ldl_cmmu
# define helper_ret_ldq_cmmu helper_le_ldq_cmmu
#endif
#endif /* CONFIG_SOFTMMU */