-----BEGIN PGP SIGNATURE-----

iQIcBAABAgAGBQJaafR3AAoJEPMMOL0/L7483m4P/3GQil19zuj6EUbukR1kRv3T
 kYK9ciuGRkJ7qv3n16RgiobUREK8AziRiPi7fvC/T82wlhNbVIvOm/EQZj/yTlto
 Z18NLhYgSPu+zH6hymJVm/+ORpfXOzhgEZWXm2X6TIa+JMjXKdnn4+bxUCZSlnDM
 lQURK2XFv3F5didCMKDiFaAfWUFp3RUlfvo50n22hCIJa/GznsgHxnHbexHdrEXh
 NF5ES0pDMfoIG938XvLmJ6X8O+//G+02DrbRvsUBV948Lvx0bLCm7tiRC+kGRYmU
 i/QzcQzh6Zr2A4wR1WtItwQzYSJoJFp2/rxebXfNEPS5pMkR8UXtmtQ1WUMD2Xl3
 FITA5rHjw1W1pCOsq3vDkU4SyMjppKSyc8bA7iFHWSl/M1q7MHlx611TdMbYuXsX
 +GOOBVEYdD4VrLpTbcyYtA/fR1kJjPHDzdQx49mFLjPdVa+d5gXhxSJjRGPzvzgl
 O4WmARQvFyI6dumzYxVdYH2tJ2o5YCI17lv/HNcxlDXAW2Xa0peFqmxa/O1Bw6E3
 ayBixnIQUzAzS/fYfDtgWL2VyhruRiA0FTdW4OpXvdRRYsxmRFT1uzGMvAPSkBE2
 OSJmO0V/oRCHt/qmiMjCvDf/npo1GwRt/MADhGFa87d+D/gu6KlAzB1HhQ5t+yh4
 5vvbKRSK5Z2TZtUgniIA
 =YOpd
 -----END PGP SIGNATURE-----

Merge remote-tracking branch 'remotes/vivier/tags/m68k-for-2.12-pull-request' into staging

# gpg: Signature made Thu 25 Jan 2018 15:15:03 GMT
# gpg:                using RSA key 0xF30C38BD3F2FBE3C
# gpg: Good signature from "Laurent Vivier <lvivier@redhat.com>"
# gpg:                 aka "Laurent Vivier <laurent@vivier.eu>"
# gpg:                 aka "Laurent Vivier (Red Hat) <lvivier@redhat.com>"
# Primary key fingerprint: CD2F 75DD C8E3 A4DC 2E4F  5173 F30C 38BD 3F2F BE3C

* remotes/vivier/tags/m68k-for-2.12-pull-request:
  target/m68k: add HMP command "info tlb"
  target/m68k: add pflush/ptest
  target/m68k: add moves
  target/m68k: add index parameter to gen_load()/gen_store() and Co.
  target/m68k: add Transparent Translation
  target/m68k: add MC68040 MMU
  accel/tcg: add size paremeter in tlb_fill()
  target/m68k: fix TCG variable double free

Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
Peter Maydell 2018-01-26 10:08:53 +00:00
commit fca3dad533
63 changed files with 1168 additions and 204 deletions

View File

@ -880,7 +880,7 @@ tb_page_addr_t get_page_addr_code(CPUArchState *env, target_ulong addr)
if (unlikely(env->tlb_table[mmu_idx][index].addr_code !=
(addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK)))) {
if (!VICTIM_TLB_HIT(addr_read, addr)) {
tlb_fill(ENV_GET_CPU(env), addr, MMU_INST_FETCH, mmu_idx, 0);
tlb_fill(ENV_GET_CPU(env), addr, 0, MMU_INST_FETCH, mmu_idx, 0);
}
}
iotlbentry = &env->iotlb[mmu_idx][index];
@ -928,7 +928,7 @@ tb_page_addr_t get_page_addr_code(CPUArchState *env, target_ulong addr)
* Otherwise the function will return, and there will be a valid
* entry in the TLB for this access.
*/
void probe_write(CPUArchState *env, target_ulong addr, int mmu_idx,
void probe_write(CPUArchState *env, target_ulong addr, int size, int mmu_idx,
uintptr_t retaddr)
{
int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
@ -938,7 +938,8 @@ void probe_write(CPUArchState *env, target_ulong addr, int mmu_idx,
!= (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
/* TLB entry is for a different page */
if (!VICTIM_TLB_HIT(addr_write, addr)) {
tlb_fill(ENV_GET_CPU(env), addr, MMU_DATA_STORE, mmu_idx, retaddr);
tlb_fill(ENV_GET_CPU(env), addr, size, MMU_DATA_STORE,
mmu_idx, retaddr);
}
}
}
@ -981,7 +982,8 @@ static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
if ((addr & TARGET_PAGE_MASK)
!= (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
if (!VICTIM_TLB_HIT(addr_write, addr)) {
tlb_fill(ENV_GET_CPU(env), addr, MMU_DATA_STORE, mmu_idx, retaddr);
tlb_fill(ENV_GET_CPU(env), addr, 1 << s_bits, MMU_DATA_STORE,
mmu_idx, retaddr);
}
tlb_addr = tlbe->addr_write & ~TLB_INVALID_MASK;
}
@ -995,7 +997,8 @@ static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
/* Let the guest notice RMW on a write-only page. */
if (unlikely(tlbe->addr_read != (tlb_addr & ~TLB_NOTDIRTY))) {
tlb_fill(ENV_GET_CPU(env), addr, MMU_DATA_LOAD, mmu_idx, retaddr);
tlb_fill(ENV_GET_CPU(env), addr, 1 << s_bits, MMU_DATA_LOAD,
mmu_idx, retaddr);
/* Since we don't support reads and writes to different addresses,
and we do have the proper page loaded for write, this shouldn't
ever return. But just in case, handle via stop-the-world. */

View File

@ -124,7 +124,7 @@ WORD_TYPE helper_le_ld_name(CPUArchState *env, target_ulong addr,
if ((addr & TARGET_PAGE_MASK)
!= (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
if (!VICTIM_TLB_HIT(ADDR_READ, addr)) {
tlb_fill(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE,
tlb_fill(ENV_GET_CPU(env), addr, DATA_SIZE, READ_ACCESS_TYPE,
mmu_idx, retaddr);
}
tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ;
@ -191,7 +191,7 @@ WORD_TYPE helper_be_ld_name(CPUArchState *env, target_ulong addr,
if ((addr & TARGET_PAGE_MASK)
!= (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
if (!VICTIM_TLB_HIT(ADDR_READ, addr)) {
tlb_fill(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE,
tlb_fill(ENV_GET_CPU(env), addr, DATA_SIZE, READ_ACCESS_TYPE,
mmu_idx, retaddr);
}
tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ;
@ -283,7 +283,8 @@ void helper_le_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
if ((addr & TARGET_PAGE_MASK)
!= (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
if (!VICTIM_TLB_HIT(addr_write, addr)) {
tlb_fill(ENV_GET_CPU(env), addr, MMU_DATA_STORE, mmu_idx, retaddr);
tlb_fill(ENV_GET_CPU(env), addr, DATA_SIZE, MMU_DATA_STORE,
mmu_idx, retaddr);
}
tlb_addr = env->tlb_table[mmu_idx][index].addr_write & ~TLB_INVALID_MASK;
}
@ -316,7 +317,7 @@ void helper_le_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
tlb_addr2 = env->tlb_table[mmu_idx][index2].addr_write;
if (page2 != (tlb_addr2 & (TARGET_PAGE_MASK | TLB_INVALID_MASK))
&& !VICTIM_TLB_HIT(addr_write, page2)) {
tlb_fill(ENV_GET_CPU(env), page2, MMU_DATA_STORE,
tlb_fill(ENV_GET_CPU(env), page2, DATA_SIZE, MMU_DATA_STORE,
mmu_idx, retaddr);
}
@ -359,7 +360,8 @@ void helper_be_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
if ((addr & TARGET_PAGE_MASK)
!= (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
if (!VICTIM_TLB_HIT(addr_write, addr)) {
tlb_fill(ENV_GET_CPU(env), addr, MMU_DATA_STORE, mmu_idx, retaddr);
tlb_fill(ENV_GET_CPU(env), addr, DATA_SIZE, MMU_DATA_STORE,
mmu_idx, retaddr);
}
tlb_addr = env->tlb_table[mmu_idx][index].addr_write & ~TLB_INVALID_MASK;
}
@ -392,7 +394,7 @@ void helper_be_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
tlb_addr2 = env->tlb_table[mmu_idx][index2].addr_write;
if (page2 != (tlb_addr2 & (TARGET_PAGE_MASK | TLB_INVALID_MASK))
&& !VICTIM_TLB_HIT(addr_write, page2)) {
tlb_fill(ENV_GET_CPU(env), page2, MMU_DATA_STORE,
tlb_fill(ENV_GET_CPU(env), page2, DATA_SIZE, MMU_DATA_STORE,
mmu_idx, retaddr);
}

View File

@ -149,7 +149,7 @@ static inline int handle_cpu_signal(uintptr_t pc, siginfo_t *info,
cc = CPU_GET_CLASS(cpu);
/* see if it is an MMU fault */
g_assert(cc->handle_mmu_fault);
ret = cc->handle_mmu_fault(cpu, address, is_write, MMU_USER_IDX);
ret = cc->handle_mmu_fault(cpu, address, 0, is_write, MMU_USER_IDX);
if (ret == 0) {
/* The MMU fault was handled without causing real CPU fault.

View File

@ -216,7 +216,7 @@ Show PCI information.
ETEXI
#if defined(TARGET_I386) || defined(TARGET_SH4) || defined(TARGET_SPARC) || \
defined(TARGET_PPC) || defined(TARGET_XTENSA)
defined(TARGET_PPC) || defined(TARGET_XTENSA) || defined(TARGET_M68K)
{
.name = "tlb",
.args_type = "",

View File

@ -253,7 +253,7 @@ void tlb_set_page(CPUState *cpu, target_ulong vaddr,
hwaddr paddr, int prot,
int mmu_idx, target_ulong size);
void tb_invalidate_phys_addr(AddressSpace *as, hwaddr addr);
void probe_write(CPUArchState *env, target_ulong addr, int mmu_idx,
void probe_write(CPUArchState *env, target_ulong addr, int size, int mmu_idx,
uintptr_t retaddr);
#else
static inline void tlb_flush_page(CPUState *cpu, target_ulong addr)
@ -436,8 +436,8 @@ void tb_lock_reset(void);
struct MemoryRegion *iotlb_to_region(CPUState *cpu,
hwaddr index, MemTxAttrs attrs);
void tlb_fill(CPUState *cpu, target_ulong addr, MMUAccessType access_type,
int mmu_idx, uintptr_t retaddr);
void tlb_fill(CPUState *cpu, target_ulong addr, int size,
MMUAccessType access_type, int mmu_idx, uintptr_t retaddr);
#endif

View File

@ -174,7 +174,7 @@ typedef struct CPUClass {
Error **errp);
void (*set_pc)(CPUState *cpu, vaddr value);
void (*synchronize_from_tb)(CPUState *cpu, struct TranslationBlock *tb);
int (*handle_mmu_fault)(CPUState *cpu, vaddr address, int rw,
int (*handle_mmu_fault)(CPUState *cpu, vaddr address, int size, int rw,
int mmu_index);
hwaddr (*get_phys_page_debug)(CPUState *cpu, vaddr addr);
hwaddr (*get_phys_page_attrs_debug)(CPUState *cpu, vaddr addr,

View File

@ -479,7 +479,7 @@ void alpha_cpu_list(FILE *f, fprintf_function cpu_fprintf);
is returned if the signal was handled by the virtual CPU. */
int cpu_alpha_signal_handler(int host_signum, void *pinfo,
void *puc);
int alpha_cpu_handle_mmu_fault(CPUState *cpu, vaddr address, int rw,
int alpha_cpu_handle_mmu_fault(CPUState *cpu, vaddr address, int size, int rw,
int mmu_idx);
void QEMU_NORETURN dynamic_excp(CPUAlphaState *, uintptr_t, int, int);
void QEMU_NORETURN arith_excp(CPUAlphaState *, uintptr_t, int, uint64_t);

View File

@ -103,7 +103,7 @@ void cpu_alpha_store_gr(CPUAlphaState *env, unsigned reg, uint64_t val)
}
#if defined(CONFIG_USER_ONLY)
int alpha_cpu_handle_mmu_fault(CPUState *cs, vaddr address,
int alpha_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int size,
int rw, int mmu_idx)
{
AlphaCPU *cpu = ALPHA_CPU(cs);
@ -247,7 +247,7 @@ hwaddr alpha_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
return (fail >= 0 ? -1 : phys);
}
int alpha_cpu_handle_mmu_fault(CPUState *cs, vaddr addr, int rw,
int alpha_cpu_handle_mmu_fault(CPUState *cs, vaddr addr, int size, int rw,
int mmu_idx)
{
AlphaCPU *cpu = ALPHA_CPU(cs);

View File

@ -69,12 +69,12 @@ void alpha_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr,
NULL, it means that the function was called in C code (i.e. not
from generated code or from helper.c) */
/* XXX: fix it to restore all registers */
void tlb_fill(CPUState *cs, target_ulong addr, MMUAccessType access_type,
int mmu_idx, uintptr_t retaddr)
void tlb_fill(CPUState *cs, target_ulong addr, int size,
MMUAccessType access_type, int mmu_idx, uintptr_t retaddr)
{
int ret;
ret = alpha_cpu_handle_mmu_fault(cs, addr, access_type, mmu_idx);
ret = alpha_cpu_handle_mmu_fault(cs, addr, size, access_type, mmu_idx);
if (unlikely(ret != 0)) {
/* Exception index and error code are already set */
cpu_loop_exit_restore(cs, retaddr);

View File

@ -1689,8 +1689,8 @@ static Property arm_cpu_properties[] = {
};
#ifdef CONFIG_USER_ONLY
static int arm_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int rw,
int mmu_idx)
static int arm_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int size,
int rw, int mmu_idx)
{
ARMCPU *cpu = ARM_CPU(cs);
CPUARMState *env = &cpu->env;

View File

@ -169,8 +169,8 @@ static void deliver_fault(ARMCPU *cpu, vaddr addr, MMUAccessType access_type,
* NULL, it means that the function was called in C code (i.e. not
* from generated code or from helper.c)
*/
void tlb_fill(CPUState *cs, target_ulong addr, MMUAccessType access_type,
int mmu_idx, uintptr_t retaddr)
void tlb_fill(CPUState *cs, target_ulong addr, int size,
MMUAccessType access_type, int mmu_idx, uintptr_t retaddr)
{
bool ret;
ARMMMUFaultInfo fi = {};

View File

@ -283,7 +283,7 @@ static inline int cpu_mmu_index (CPUCRISState *env, bool ifetch)
return !!(env->pregs[PR_CCS] & U_FLAG);
}
int cris_cpu_handle_mmu_fault(CPUState *cpu, vaddr address, int rw,
int cris_cpu_handle_mmu_fault(CPUState *cpu, vaddr address, int size, int rw,
int mmu_idx);
/* Support function regs. */

View File

@ -53,7 +53,7 @@ void crisv10_cpu_do_interrupt(CPUState *cs)
cris_cpu_do_interrupt(cs);
}
int cris_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int rw,
int cris_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int size, int rw,
int mmu_idx)
{
CRISCPU *cpu = CRIS_CPU(cs);
@ -76,7 +76,7 @@ static void cris_shift_ccs(CPUCRISState *env)
env->pregs[PR_CCS] = ccs;
}
int cris_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int rw,
int cris_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int size, int rw,
int mmu_idx)
{
CRISCPU *cpu = CRIS_CPU(cs);

View File

@ -41,8 +41,8 @@
/* Try to fill the TLB and return an exception if error. If retaddr is
NULL, it means that the function was called in C code (i.e. not
from generated code or from helper.c) */
void tlb_fill(CPUState *cs, target_ulong addr, MMUAccessType access_type,
int mmu_idx, uintptr_t retaddr)
void tlb_fill(CPUState *cs, target_ulong addr, int size,
MMUAccessType access_type, int mmu_idx, uintptr_t retaddr)
{
CRISCPU *cpu = CRIS_CPU(cs);
CPUCRISState *env = &cpu->env;
@ -50,7 +50,7 @@ void tlb_fill(CPUState *cs, target_ulong addr, MMUAccessType access_type,
D_LOG("%s pc=%x tpc=%x ra=%p\n", __func__,
env->pc, env->pregs[PR_EDA], (void *)retaddr);
ret = cris_cpu_handle_mmu_fault(cs, addr, access_type, mmu_idx);
ret = cris_cpu_handle_mmu_fault(cs, addr, size, access_type, mmu_idx);
if (unlikely(ret)) {
if (retaddr) {
/* now we have a real cpu fault */

View File

@ -132,7 +132,8 @@ void cpu_hppa_loaded_fr0(CPUHPPAState *env);
#define cpu_signal_handler cpu_hppa_signal_handler
int cpu_hppa_signal_handler(int host_signum, void *pinfo, void *puc);
int hppa_cpu_handle_mmu_fault(CPUState *cpu, vaddr address, int rw, int midx);
int hppa_cpu_handle_mmu_fault(CPUState *cpu, vaddr address, int size,
int rw, int midx);
int hppa_cpu_gdb_read_register(CPUState *cpu, uint8_t *buf, int reg);
int hppa_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg);
void hppa_cpu_do_interrupt(CPUState *cpu);

View File

@ -65,7 +65,7 @@ void cpu_hppa_put_psw(CPUHPPAState *env, target_ulong psw)
env->psw_cb = cb;
}
int hppa_cpu_handle_mmu_fault(CPUState *cs, vaddr address,
int hppa_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int size,
int rw, int mmu_idx)
{
HPPACPU *cpu = HPPA_CPU(cs);

View File

@ -139,7 +139,7 @@ static void do_stby_e(CPUHPPAState *env, target_ulong addr, target_ulong val,
/* Nothing is stored, but protection is checked and the
cacheline is marked dirty. */
#ifndef CONFIG_USER_ONLY
probe_write(env, addr, cpu_mmu_index(env, 0), ra);
probe_write(env, addr, 0, cpu_mmu_index(env, 0), ra);
#endif
break;
}

View File

@ -1504,7 +1504,7 @@ void host_cpuid(uint32_t function, uint32_t count,
void host_vendor_fms(char *vendor, int *family, int *model, int *stepping);
/* helper.c */
int x86_cpu_handle_mmu_fault(CPUState *cpu, vaddr addr,
int x86_cpu_handle_mmu_fault(CPUState *cpu, vaddr addr, int size,
int is_write, int mmu_idx);
void x86_cpu_set_a20(X86CPU *cpu, int a20_state);

View File

@ -138,7 +138,7 @@ void raise_exception_ra(CPUX86State *env, int exception_index, uintptr_t retaddr
}
#if defined(CONFIG_USER_ONLY)
int x86_cpu_handle_mmu_fault(CPUState *cs, vaddr addr,
int x86_cpu_handle_mmu_fault(CPUState *cs, vaddr addr, int size,
int is_write, int mmu_idx)
{
X86CPU *cpu = X86_CPU(cs);
@ -162,7 +162,7 @@ int x86_cpu_handle_mmu_fault(CPUState *cs, vaddr addr,
* 0 = nothing more to do
* 1 = generate PF fault
*/
int x86_cpu_handle_mmu_fault(CPUState *cs, vaddr addr,
int x86_cpu_handle_mmu_fault(CPUState *cs, vaddr addr, int size,
int is_write1, int mmu_idx)
{
X86CPU *cpu = X86_CPU(cs);

View File

@ -199,12 +199,12 @@ void helper_boundl(CPUX86State *env, target_ulong a0, int v)
* from generated code or from helper.c)
*/
/* XXX: fix it to restore all registers */
void tlb_fill(CPUState *cs, target_ulong addr, MMUAccessType access_type,
int mmu_idx, uintptr_t retaddr)
void tlb_fill(CPUState *cs, target_ulong addr, int size,
MMUAccessType access_type, int mmu_idx, uintptr_t retaddr)
{
int ret;
ret = x86_cpu_handle_mmu_fault(cs, addr, access_type, mmu_idx);
ret = x86_cpu_handle_mmu_fault(cs, addr, size, access_type, mmu_idx);
if (ret) {
X86CPU *cpu = X86_CPU(cs);
CPUX86State *env = &cpu->env;

View File

@ -263,7 +263,7 @@ bool lm32_cpu_do_semihosting(CPUState *cs);
#define cpu_list lm32_cpu_list
#define cpu_signal_handler cpu_lm32_signal_handler
int lm32_cpu_handle_mmu_fault(CPUState *cpu, vaddr address, int rw,
int lm32_cpu_handle_mmu_fault(CPUState *cpu, vaddr address, int size, int rw,
int mmu_idx);
#include "exec/cpu-all.h"

View File

@ -25,7 +25,7 @@
#include "exec/semihost.h"
#include "exec/log.h"
int lm32_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int rw,
int lm32_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int size, int rw,
int mmu_idx)
{
LM32CPU *cpu = LM32_CPU(cs);

View File

@ -144,12 +144,12 @@ uint32_t HELPER(rcsr_jrx)(CPULM32State *env)
* NULL, it means that the function was called in C code (i.e. not
* from generated code or from helper.c)
*/
void tlb_fill(CPUState *cs, target_ulong addr, MMUAccessType access_type,
int mmu_idx, uintptr_t retaddr)
void tlb_fill(CPUState *cs, target_ulong addr, int size,
MMUAccessType access_type, int mmu_idx, uintptr_t retaddr)
{
int ret;
ret = lm32_cpu_handle_mmu_fault(cs, addr, access_type, mmu_idx);
ret = lm32_cpu_handle_mmu_fault(cs, addr, size, access_type, mmu_idx);
if (unlikely(ret)) {
/* now we have a real cpu fault */
cpu_loop_exit_restore(cs, retaddr);

View File

@ -269,9 +269,9 @@ static void m68k_cpu_class_init(ObjectClass *c, void *data)
cc->set_pc = m68k_cpu_set_pc;
cc->gdb_read_register = m68k_cpu_gdb_read_register;
cc->gdb_write_register = m68k_cpu_gdb_write_register;
#ifdef CONFIG_USER_ONLY
cc->handle_mmu_fault = m68k_cpu_handle_mmu_fault;
#else
#if defined(CONFIG_SOFTMMU)
cc->do_unassigned_access = m68k_cpu_unassigned_access;
cc->get_phys_page_debug = m68k_cpu_get_phys_page_debug;
#endif
cc->disas_set_info = m68k_cpu_disas_set_info;

View File

@ -76,6 +76,14 @@
#define EXCP_RTE 0x100
#define EXCP_HALT_INSN 0x101
#define M68K_DTTR0 0
#define M68K_DTTR1 1
#define M68K_ITTR0 2
#define M68K_ITTR1 3
#define M68K_MAX_TTR 2
#define TTR(type, index) ttr[((type & ACCESS_CODE) == ACCESS_CODE) * 2 + index]
#define NB_MMU_MODES 2
#define TARGET_INSN_START_EXTRA_WORDS 1
@ -116,6 +124,14 @@ typedef struct CPUM68KState {
/* MMU status. */
struct {
uint32_t ar;
uint32_t ssw;
/* 68040 */
uint16_t tcr;
uint32_t urp;
uint32_t srp;
bool fault;
uint32_t ttr[4];
uint32_t mmusr;
} mmu;
/* Control registers. */
@ -123,6 +139,8 @@ typedef struct CPUM68KState {
uint32_t mbar;
uint32_t rambar0;
uint32_t cacr;
uint32_t sfc;
uint32_t dfc;
int pending_vector;
int pending_level;
@ -226,6 +244,104 @@ typedef enum {
#define M68K_USP 1
#define M68K_ISP 2
/* bits for 68040 special status word */
#define M68K_CP_040 0x8000
#define M68K_CU_040 0x4000
#define M68K_CT_040 0x2000
#define M68K_CM_040 0x1000
#define M68K_MA_040 0x0800
#define M68K_ATC_040 0x0400
#define M68K_LK_040 0x0200
#define M68K_RW_040 0x0100
#define M68K_SIZ_040 0x0060
#define M68K_TT_040 0x0018
#define M68K_TM_040 0x0007
#define M68K_TM_040_DATA 0x0001
#define M68K_TM_040_CODE 0x0002
#define M68K_TM_040_SUPER 0x0004
/* bits for 68040 write back status word */
#define M68K_WBV_040 0x80
#define M68K_WBSIZ_040 0x60
#define M68K_WBBYT_040 0x20
#define M68K_WBWRD_040 0x40
#define M68K_WBLNG_040 0x00
#define M68K_WBTT_040 0x18
#define M68K_WBTM_040 0x07
/* bus access size codes */
#define M68K_BA_SIZE_MASK 0x60
#define M68K_BA_SIZE_BYTE 0x20
#define M68K_BA_SIZE_WORD 0x40
#define M68K_BA_SIZE_LONG 0x00
#define M68K_BA_SIZE_LINE 0x60
/* bus access transfer type codes */
#define M68K_BA_TT_MOVE16 0x08
/* bits for 68040 MMU status register (mmusr) */
#define M68K_MMU_B_040 0x0800
#define M68K_MMU_G_040 0x0400
#define M68K_MMU_U1_040 0x0200
#define M68K_MMU_U0_040 0x0100
#define M68K_MMU_S_040 0x0080
#define M68K_MMU_CM_040 0x0060
#define M68K_MMU_M_040 0x0010
#define M68K_MMU_WP_040 0x0004
#define M68K_MMU_T_040 0x0002
#define M68K_MMU_R_040 0x0001
#define M68K_MMU_SR_MASK_040 (M68K_MMU_G_040 | M68K_MMU_U1_040 | \
M68K_MMU_U0_040 | M68K_MMU_S_040 | \
M68K_MMU_CM_040 | M68K_MMU_M_040 | \
M68K_MMU_WP_040)
/* bits for 68040 MMU Translation Control Register */
#define M68K_TCR_ENABLED 0x8000
#define M68K_TCR_PAGE_8K 0x4000
/* bits for 68040 MMU Table Descriptor / Page Descriptor / TTR */
#define M68K_DESC_WRITEPROT 0x00000004
#define M68K_DESC_USED 0x00000008
#define M68K_DESC_MODIFIED 0x00000010
#define M68K_DESC_CACHEMODE 0x00000060
#define M68K_DESC_CM_WRTHRU 0x00000000
#define M68K_DESC_CM_COPYBK 0x00000020
#define M68K_DESC_CM_SERIAL 0x00000040
#define M68K_DESC_CM_NCACHE 0x00000060
#define M68K_DESC_SUPERONLY 0x00000080
#define M68K_DESC_USERATTR 0x00000300
#define M68K_DESC_USERATTR_SHIFT 8
#define M68K_DESC_GLOBAL 0x00000400
#define M68K_DESC_URESERVED 0x00000800
#define M68K_ROOT_POINTER_ENTRIES 128
#define M68K_4K_PAGE_MASK (~0xff)
#define M68K_POINTER_BASE(entry) (entry & ~0x1ff)
#define M68K_ROOT_INDEX(addr) ((address >> 23) & 0x1fc)
#define M68K_POINTER_INDEX(addr) ((address >> 16) & 0x1fc)
#define M68K_4K_PAGE_BASE(entry) (next & M68K_4K_PAGE_MASK)
#define M68K_4K_PAGE_INDEX(addr) ((address >> 10) & 0xfc)
#define M68K_8K_PAGE_MASK (~0x7f)
#define M68K_8K_PAGE_BASE(entry) (next & M68K_8K_PAGE_MASK)
#define M68K_8K_PAGE_INDEX(addr) ((address >> 11) & 0x7c)
#define M68K_UDT_VALID(entry) (entry & 2)
#define M68K_PDT_VALID(entry) (entry & 3)
#define M68K_PDT_INDIRECT(entry) ((entry & 3) == 2)
#define M68K_INDIRECT_POINTER(addr) (addr & ~3)
#define M68K_TTS_POINTER_SHIFT 18
#define M68K_TTS_ROOT_SHIFT 25
/* bits for 68040 MMU Transparent Translation Registers */
#define M68K_TTR_ADDR_BASE 0xff000000
#define M68K_TTR_ADDR_MASK 0x00ff0000
#define M68K_TTR_ADDR_MASK_SHIFT 8
#define M68K_TTR_ENABLED 0x00008000
#define M68K_TTR_SFIELD 0x00006000
#define M68K_TTR_SFIELD_USER 0x0000
#define M68K_TTR_SFIELD_SUPER 0x2000
/* m68k Control Registers */
/* ColdFire */
@ -387,16 +503,25 @@ void m68k_cpu_list(FILE *f, fprintf_function cpu_fprintf);
void register_m68k_insns (CPUM68KState *env);
#ifdef CONFIG_USER_ONLY
/* Coldfire Linux uses 8k pages
* and m68k linux uses 4k pages
* use the smaller one
* use the smallest one
*/
#define TARGET_PAGE_BITS 12
#else
/* Smallest TLB entry size is 1k. */
#define TARGET_PAGE_BITS 10
#endif
enum {
/* 1 bit to define user level / supervisor access */
ACCESS_SUPER = 0x01,
/* 1 bit to indicate direction */
ACCESS_STORE = 0x02,
/* 1 bit to indicate debug access */
ACCESS_DEBUG = 0x04,
/* PTEST instruction */
ACCESS_PTEST = 0x08,
/* Type of instruction that generated the access */
ACCESS_CODE = 0x10, /* Code fetch access */
ACCESS_DATA = 0x20, /* Data load/store access */
};
#define TARGET_PHYS_ADDR_SPACE_BITS 32
#define TARGET_VIRT_ADDR_SPACE_BITS 32
@ -412,24 +537,42 @@ void register_m68k_insns (CPUM68KState *env);
/* MMU modes definitions */
#define MMU_MODE0_SUFFIX _kernel
#define MMU_MODE1_SUFFIX _user
#define MMU_KERNEL_IDX 0
#define MMU_USER_IDX 1
static inline int cpu_mmu_index (CPUM68KState *env, bool ifetch)
{
return (env->sr & SR_S) == 0 ? 1 : 0;
}
int m68k_cpu_handle_mmu_fault(CPUState *cpu, vaddr address, int rw,
int m68k_cpu_handle_mmu_fault(CPUState *cpu, vaddr address, int size, int rw,
int mmu_idx);
void m68k_cpu_unassigned_access(CPUState *cs, hwaddr addr,
bool is_write, bool is_exec, int is_asi,
unsigned size);
#include "exec/cpu-all.h"
/* TB flags */
#define TB_FLAGS_MACSR 0x0f
#define TB_FLAGS_MSR_S_BIT 13
#define TB_FLAGS_MSR_S (1 << TB_FLAGS_MSR_S_BIT)
#define TB_FLAGS_SFC_S_BIT 14
#define TB_FLAGS_SFC_S (1 << TB_FLAGS_SFC_S_BIT)
#define TB_FLAGS_DFC_S_BIT 15
#define TB_FLAGS_DFC_S (1 << TB_FLAGS_DFC_S_BIT)
static inline void cpu_get_tb_cpu_state(CPUM68KState *env, target_ulong *pc,
target_ulong *cs_base, uint32_t *flags)
{
*pc = env->pc;
*cs_base = 0;
*flags = (env->sr & SR_S) /* Bit 13 */
| ((env->macsr >> 4) & 0xf); /* Bits 0-3 */
*flags = (env->macsr >> 4) & TB_FLAGS_MACSR;
if (env->sr & SR_S) {
*flags |= TB_FLAGS_MSR_S;
*flags |= (env->sfc << (TB_FLAGS_SFC_S_BIT - 2)) & TB_FLAGS_SFC_S;
*flags |= (env->dfc << (TB_FLAGS_DFC_S_BIT - 2)) & TB_FLAGS_DFC_S;
}
}
void dump_mmu(FILE *f, fprintf_function cpu_fprintf, CPUM68KState *env);
#endif

View File

@ -203,6 +203,12 @@ void HELPER(m68k_movec_to)(CPUM68KState *env, uint32_t reg, uint32_t val)
switch (reg) {
/* MC680[1234]0 */
case M68K_CR_SFC:
env->sfc = val & 7;
return;
case M68K_CR_DFC:
env->dfc = val & 7;
return;
case M68K_CR_VBR:
env->vbr = val;
return;
@ -212,6 +218,18 @@ void HELPER(m68k_movec_to)(CPUM68KState *env, uint32_t reg, uint32_t val)
m68k_switch_sp(env);
return;
/* MC680[34]0 */
case M68K_CR_TC:
env->mmu.tcr = val;
return;
case M68K_CR_MMUSR:
env->mmu.mmusr = val;
return;
case M68K_CR_SRP:
env->mmu.srp = val;
return;
case M68K_CR_URP:
env->mmu.urp = val;
return;
case M68K_CR_USP:
env->sp[M68K_USP] = val;
return;
@ -221,6 +239,19 @@ void HELPER(m68k_movec_to)(CPUM68KState *env, uint32_t reg, uint32_t val)
case M68K_CR_ISP:
env->sp[M68K_ISP] = val;
return;
/* MC68040/MC68LC040 */
case M68K_CR_ITT0:
env->mmu.ttr[M68K_ITTR0] = val;
return;
case M68K_CR_ITT1:
env->mmu.ttr[M68K_ITTR1] = val;
return;
case M68K_CR_DTT0:
env->mmu.ttr[M68K_DTTR0] = val;
return;
case M68K_CR_DTT1:
env->mmu.ttr[M68K_DTTR1] = val;
return;
}
cpu_abort(CPU(cpu), "Unimplemented control register write 0x%x = 0x%x\n",
reg, val);
@ -232,18 +263,39 @@ uint32_t HELPER(m68k_movec_from)(CPUM68KState *env, uint32_t reg)
switch (reg) {
/* MC680[1234]0 */
case M68K_CR_SFC:
return env->sfc;
case M68K_CR_DFC:
return env->dfc;
case M68K_CR_VBR:
return env->vbr;
/* MC680[234]0 */
case M68K_CR_CACR:
return env->cacr;
/* MC680[34]0 */
case M68K_CR_TC:
return env->mmu.tcr;
case M68K_CR_MMUSR:
return env->mmu.mmusr;
case M68K_CR_SRP:
return env->mmu.srp;
case M68K_CR_USP:
return env->sp[M68K_USP];
case M68K_CR_MSP:
return env->sp[M68K_SSP];
case M68K_CR_ISP:
return env->sp[M68K_ISP];
/* MC68040/MC68LC040 */
case M68K_CR_URP:
return env->mmu.urp;
case M68K_CR_ITT0:
return env->mmu.ttr[M68K_ITTR0];
case M68K_CR_ITT1:
return env->mmu.ttr[M68K_ITTR1];
case M68K_CR_DTT0:
return env->mmu.ttr[M68K_DTTR0];
case M68K_CR_DTT1:
return env->mmu.ttr[M68K_DTTR1];
}
cpu_abort(CPU(cpu), "Unimplemented control register read 0x%x\n",
reg);
@ -308,7 +360,7 @@ void m68k_switch_sp(CPUM68KState *env)
#if defined(CONFIG_USER_ONLY)
int m68k_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int rw,
int m68k_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int size, int rw,
int mmu_idx)
{
M68kCPU *cpu = M68K_CPU(cs);
@ -320,23 +372,507 @@ int m68k_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int rw,
#else
/* MMU */
/* MMU: 68040 only */
/* TODO: This will need fixing once the MMU is implemented. */
hwaddr m68k_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
static void print_address_zone(FILE *f, fprintf_function cpu_fprintf,
uint32_t logical, uint32_t physical,
uint32_t size, int attr)
{
return addr;
cpu_fprintf(f, "%08x - %08x -> %08x - %08x %c ",
logical, logical + size - 1,
physical, physical + size - 1,
attr & 4 ? 'W' : '-');
size >>= 10;
if (size < 1024) {
cpu_fprintf(f, "(%d KiB)\n", size);
} else {
size >>= 10;
if (size < 1024) {
cpu_fprintf(f, "(%d MiB)\n", size);
} else {
size >>= 10;
cpu_fprintf(f, "(%d GiB)\n", size);
}
}
}
int m68k_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int rw,
static void dump_address_map(FILE *f, fprintf_function cpu_fprintf,
CPUM68KState *env, uint32_t root_pointer)
{
int i, j, k;
int tic_size, tic_shift;
uint32_t tib_mask;
uint32_t tia, tib, tic;
uint32_t logical = 0xffffffff, physical = 0xffffffff;
uint32_t first_logical = 0xffffffff, first_physical = 0xffffffff;
uint32_t last_logical, last_physical;
int32_t size;
int last_attr = -1, attr = -1;
M68kCPU *cpu = m68k_env_get_cpu(env);
CPUState *cs = CPU(cpu);
if (env->mmu.tcr & M68K_TCR_PAGE_8K) {
/* 8k page */
tic_size = 32;
tic_shift = 13;
tib_mask = M68K_8K_PAGE_MASK;
} else {
/* 4k page */
tic_size = 64;
tic_shift = 12;
tib_mask = M68K_4K_PAGE_MASK;
}
for (i = 0; i < M68K_ROOT_POINTER_ENTRIES; i++) {
tia = ldl_phys(cs->as, M68K_POINTER_BASE(root_pointer) + i * 4);
if (!M68K_UDT_VALID(tia)) {
continue;
}
for (j = 0; j < M68K_ROOT_POINTER_ENTRIES; j++) {
tib = ldl_phys(cs->as, M68K_POINTER_BASE(tia) + j * 4);
if (!M68K_UDT_VALID(tib)) {
continue;
}
for (k = 0; k < tic_size; k++) {
tic = ldl_phys(cs->as, (tib & tib_mask) + k * 4);
if (!M68K_PDT_VALID(tic)) {
continue;
}
if (M68K_PDT_INDIRECT(tic)) {
tic = ldl_phys(cs->as, M68K_INDIRECT_POINTER(tic));
}
last_logical = logical;
logical = (i << M68K_TTS_ROOT_SHIFT) |
(j << M68K_TTS_POINTER_SHIFT) |
(k << tic_shift);
last_physical = physical;
physical = tic & ~((1 << tic_shift) - 1);
last_attr = attr;
attr = tic & ((1 << tic_shift) - 1);
if ((logical != (last_logical + (1 << tic_shift))) ||
(physical != (last_physical + (1 << tic_shift))) ||
(attr & 4) != (last_attr & 4)) {
if (first_logical != 0xffffffff) {
size = last_logical + (1 << tic_shift) -
first_logical;
print_address_zone(f, cpu_fprintf, first_logical,
first_physical, size, last_attr);
}
first_logical = logical;
first_physical = physical;
}
}
}
}
if (first_logical != logical || (attr & 4) != (last_attr & 4)) {
size = logical + (1 << tic_shift) - first_logical;
print_address_zone(f, cpu_fprintf, first_logical, first_physical, size,
last_attr);
}
}
#define DUMP_CACHEFLAGS(a) \
switch (a & M68K_DESC_CACHEMODE) { \
case M68K_DESC_CM_WRTHRU: /* cachable, write-through */ \
cpu_fprintf(f, "T"); \
break; \
case M68K_DESC_CM_COPYBK: /* cachable, copyback */ \
cpu_fprintf(f, "C"); \
break; \
case M68K_DESC_CM_SERIAL: /* noncachable, serialized */ \
cpu_fprintf(f, "S"); \
break; \
case M68K_DESC_CM_NCACHE: /* noncachable */ \
cpu_fprintf(f, "N"); \
break; \
}
static void dump_ttr(FILE *f, fprintf_function cpu_fprintf, uint32_t ttr)
{
if ((ttr & M68K_TTR_ENABLED) == 0) {
cpu_fprintf(f, "disabled\n");
return;
}
cpu_fprintf(f, "Base: 0x%08x Mask: 0x%08x Control: ",
ttr & M68K_TTR_ADDR_BASE,
(ttr & M68K_TTR_ADDR_MASK) << M68K_TTR_ADDR_MASK_SHIFT);
switch (ttr & M68K_TTR_SFIELD) {
case M68K_TTR_SFIELD_USER:
cpu_fprintf(f, "U");
break;
case M68K_TTR_SFIELD_SUPER:
cpu_fprintf(f, "S");
break;
default:
cpu_fprintf(f, "*");
break;
}
DUMP_CACHEFLAGS(ttr);
if (ttr & M68K_DESC_WRITEPROT) {
cpu_fprintf(f, "R");
} else {
cpu_fprintf(f, "W");
}
cpu_fprintf(f, " U: %d\n", (ttr & M68K_DESC_USERATTR) >>
M68K_DESC_USERATTR_SHIFT);
}
void dump_mmu(FILE *f, fprintf_function cpu_fprintf, CPUM68KState *env)
{
if ((env->mmu.tcr & M68K_TCR_ENABLED) == 0) {
cpu_fprintf(f, "Translation disabled\n");
return;
}
cpu_fprintf(f, "Page Size: ");
if (env->mmu.tcr & M68K_TCR_PAGE_8K) {
cpu_fprintf(f, "8kB\n");
} else {
cpu_fprintf(f, "4kB\n");
}
cpu_fprintf(f, "MMUSR: ");
if (env->mmu.mmusr & M68K_MMU_B_040) {
cpu_fprintf(f, "BUS ERROR\n");
} else {
cpu_fprintf(f, "Phy=%08x Flags: ", env->mmu.mmusr & 0xfffff000);
/* flags found on the page descriptor */
if (env->mmu.mmusr & M68K_MMU_G_040) {
cpu_fprintf(f, "G"); /* Global */
} else {
cpu_fprintf(f, ".");
}
if (env->mmu.mmusr & M68K_MMU_S_040) {
cpu_fprintf(f, "S"); /* Supervisor */
} else {
cpu_fprintf(f, ".");
}
if (env->mmu.mmusr & M68K_MMU_M_040) {
cpu_fprintf(f, "M"); /* Modified */
} else {
cpu_fprintf(f, ".");
}
if (env->mmu.mmusr & M68K_MMU_WP_040) {
cpu_fprintf(f, "W"); /* Write protect */
} else {
cpu_fprintf(f, ".");
}
if (env->mmu.mmusr & M68K_MMU_T_040) {
cpu_fprintf(f, "T"); /* Transparent */
} else {
cpu_fprintf(f, ".");
}
if (env->mmu.mmusr & M68K_MMU_R_040) {
cpu_fprintf(f, "R"); /* Resident */
} else {
cpu_fprintf(f, ".");
}
cpu_fprintf(f, " Cache: ");
DUMP_CACHEFLAGS(env->mmu.mmusr);
cpu_fprintf(f, " U: %d\n", (env->mmu.mmusr >> 8) & 3);
cpu_fprintf(f, "\n");
}
cpu_fprintf(f, "ITTR0: ");
dump_ttr(f, cpu_fprintf, env->mmu.ttr[M68K_ITTR0]);
cpu_fprintf(f, "ITTR1: ");
dump_ttr(f, cpu_fprintf, env->mmu.ttr[M68K_ITTR1]);
cpu_fprintf(f, "DTTR0: ");
dump_ttr(f, cpu_fprintf, env->mmu.ttr[M68K_DTTR0]);
cpu_fprintf(f, "DTTR1: ");
dump_ttr(f, cpu_fprintf, env->mmu.ttr[M68K_DTTR1]);
cpu_fprintf(f, "SRP: 0x%08x\n", env->mmu.srp);
dump_address_map(f, cpu_fprintf, env, env->mmu.srp);
cpu_fprintf(f, "URP: 0x%08x\n", env->mmu.urp);
dump_address_map(f, cpu_fprintf, env, env->mmu.urp);
}
static int check_TTR(uint32_t ttr, int *prot, target_ulong addr,
int access_type)
{
uint32_t base, mask;
/* check if transparent translation is enabled */
if ((ttr & M68K_TTR_ENABLED) == 0) {
return 0;
}
/* check mode access */
switch (ttr & M68K_TTR_SFIELD) {
case M68K_TTR_SFIELD_USER:
/* match only if user */
if ((access_type & ACCESS_SUPER) != 0) {
return 0;
}
break;
case M68K_TTR_SFIELD_SUPER:
/* match only if supervisor */
if ((access_type & ACCESS_SUPER) == 0) {
return 0;
}
break;
default:
/* all other values disable mode matching (FC2) */
break;
}
/* check address matching */
base = ttr & M68K_TTR_ADDR_BASE;
mask = (ttr & M68K_TTR_ADDR_MASK) ^ M68K_TTR_ADDR_MASK;
mask <<= M68K_TTR_ADDR_MASK_SHIFT;
if ((addr & mask) != (base & mask)) {
return 0;
}
*prot = PAGE_READ | PAGE_EXEC;
if ((ttr & M68K_DESC_WRITEPROT) == 0) {
*prot |= PAGE_WRITE;
}
return 1;
}
static int get_physical_address(CPUM68KState *env, hwaddr *physical,
int *prot, target_ulong address,
int access_type, target_ulong *page_size)
{
M68kCPU *cpu = m68k_env_get_cpu(env);
CPUState *cs = CPU(cpu);
uint32_t entry;
uint32_t next;
target_ulong page_mask;
bool debug = access_type & ACCESS_DEBUG;
int page_bits;
int i;
/* Transparent Translation (physical = logical) */
for (i = 0; i < M68K_MAX_TTR; i++) {
if (check_TTR(env->mmu.TTR(access_type, i),
prot, address, access_type)) {
if (access_type & ACCESS_PTEST) {
/* Transparent Translation Register bit */
env->mmu.mmusr = M68K_MMU_T_040 | M68K_MMU_R_040;
}
*physical = address & TARGET_PAGE_MASK;
*page_size = TARGET_PAGE_SIZE;
return 0;
}
}
/* Page Table Root Pointer */
*prot = PAGE_READ | PAGE_WRITE;
if (access_type & ACCESS_CODE) {
*prot |= PAGE_EXEC;
}
if (access_type & ACCESS_SUPER) {
next = env->mmu.srp;
} else {
next = env->mmu.urp;
}
/* Root Index */
entry = M68K_POINTER_BASE(next) | M68K_ROOT_INDEX(address);
next = ldl_phys(cs->as, entry);
if (!M68K_UDT_VALID(next)) {
return -1;
}
if (!(next & M68K_DESC_USED) && !debug) {
stl_phys(cs->as, entry, next | M68K_DESC_USED);
}
if (next & M68K_DESC_WRITEPROT) {
if (access_type & ACCESS_PTEST) {
env->mmu.mmusr |= M68K_MMU_WP_040;
}
*prot &= ~PAGE_WRITE;
if (access_type & ACCESS_STORE) {
return -1;
}
}
/* Pointer Index */
entry = M68K_POINTER_BASE(next) | M68K_POINTER_INDEX(address);
next = ldl_phys(cs->as, entry);
if (!M68K_UDT_VALID(next)) {
return -1;
}
if (!(next & M68K_DESC_USED) && !debug) {
stl_phys(cs->as, entry, next | M68K_DESC_USED);
}
if (next & M68K_DESC_WRITEPROT) {
if (access_type & ACCESS_PTEST) {
env->mmu.mmusr |= M68K_MMU_WP_040;
}
*prot &= ~PAGE_WRITE;
if (access_type & ACCESS_STORE) {
return -1;
}
}
/* Page Index */
if (env->mmu.tcr & M68K_TCR_PAGE_8K) {
entry = M68K_8K_PAGE_BASE(next) | M68K_8K_PAGE_INDEX(address);
} else {
entry = M68K_4K_PAGE_BASE(next) | M68K_4K_PAGE_INDEX(address);
}
next = ldl_phys(cs->as, entry);
if (!M68K_PDT_VALID(next)) {
return -1;
}
if (M68K_PDT_INDIRECT(next)) {
next = ldl_phys(cs->as, M68K_INDIRECT_POINTER(next));
}
if (access_type & ACCESS_STORE) {
if (next & M68K_DESC_WRITEPROT) {
if (!(next & M68K_DESC_USED) && !debug) {
stl_phys(cs->as, entry, next | M68K_DESC_USED);
}
} else if ((next & (M68K_DESC_MODIFIED | M68K_DESC_USED)) !=
(M68K_DESC_MODIFIED | M68K_DESC_USED) && !debug) {
stl_phys(cs->as, entry,
next | (M68K_DESC_MODIFIED | M68K_DESC_USED));
}
} else {
if (!(next & M68K_DESC_USED) && !debug) {
stl_phys(cs->as, entry, next | M68K_DESC_USED);
}
}
if (env->mmu.tcr & M68K_TCR_PAGE_8K) {
page_bits = 13;
} else {
page_bits = 12;
}
*page_size = 1 << page_bits;
page_mask = ~(*page_size - 1);
*physical = next & page_mask;
if (access_type & ACCESS_PTEST) {
env->mmu.mmusr |= next & M68K_MMU_SR_MASK_040;
env->mmu.mmusr |= *physical & 0xfffff000;
env->mmu.mmusr |= M68K_MMU_R_040;
}
if (next & M68K_DESC_WRITEPROT) {
*prot &= ~PAGE_WRITE;
if (access_type & ACCESS_STORE) {
return -1;
}
}
if (next & M68K_DESC_SUPERONLY) {
if ((access_type & ACCESS_SUPER) == 0) {
return -1;
}
}
return 0;
}
hwaddr m68k_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
{
M68kCPU *cpu = M68K_CPU(cs);
CPUM68KState *env = &cpu->env;
hwaddr phys_addr;
int prot;
int access_type;
target_ulong page_size;
if ((env->mmu.tcr & M68K_TCR_ENABLED) == 0) {
/* MMU disabled */
return addr;
}
access_type = ACCESS_DATA | ACCESS_DEBUG;
if (env->sr & SR_S) {
access_type |= ACCESS_SUPER;
}
if (get_physical_address(env, &phys_addr, &prot,
addr, access_type, &page_size) != 0) {
return -1;
}
return phys_addr;
}
int m68k_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int size, int rw,
int mmu_idx)
{
M68kCPU *cpu = M68K_CPU(cs);
CPUM68KState *env = &cpu->env;
hwaddr physical;
int prot;
int access_type;
int ret;
target_ulong page_size;
address &= TARGET_PAGE_MASK;
prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
tlb_set_page(cs, address, address, prot, mmu_idx, TARGET_PAGE_SIZE);
return 0;
if ((env->mmu.tcr & M68K_TCR_ENABLED) == 0) {
/* MMU disabled */
tlb_set_page(cs, address & TARGET_PAGE_MASK,
address & TARGET_PAGE_MASK,
PAGE_READ | PAGE_WRITE | PAGE_EXEC,
mmu_idx, TARGET_PAGE_SIZE);
return 0;
}
if (rw == 2) {
access_type = ACCESS_CODE;
rw = 0;
} else {
access_type = ACCESS_DATA;
if (rw) {
access_type |= ACCESS_STORE;
}
}
if (mmu_idx != MMU_USER_IDX) {
access_type |= ACCESS_SUPER;
}
ret = get_physical_address(&cpu->env, &physical, &prot,
address, access_type, &page_size);
if (ret == 0) {
address &= TARGET_PAGE_MASK;
physical += address & (page_size - 1);
tlb_set_page(cs, address, physical,
prot, mmu_idx, TARGET_PAGE_SIZE);
return 0;
}
/* page fault */
env->mmu.ssw = M68K_ATC_040;
switch (size) {
case 1:
env->mmu.ssw |= M68K_BA_SIZE_BYTE;
break;
case 2:
env->mmu.ssw |= M68K_BA_SIZE_WORD;
break;
case 4:
env->mmu.ssw |= M68K_BA_SIZE_LONG;
break;
}
if (access_type & ACCESS_SUPER) {
env->mmu.ssw |= M68K_TM_040_SUPER;
}
if (access_type & ACCESS_CODE) {
env->mmu.ssw |= M68K_TM_040_CODE;
} else {
env->mmu.ssw |= M68K_TM_040_DATA;
}
if (!(access_type & ACCESS_STORE)) {
env->mmu.ssw |= M68K_RW_040;
}
env->mmu.ar = address;
cs->exception_index = EXCP_ACCESS;
return 1;
}
/* Notify CPU of a pending interrupt. Prioritization and vectoring should
@ -781,6 +1317,58 @@ void HELPER(set_mac_extu)(CPUM68KState *env, uint32_t val, uint32_t acc)
}
#if defined(CONFIG_SOFTMMU)
void HELPER(ptest)(CPUM68KState *env, uint32_t addr, uint32_t is_read)
{
M68kCPU *cpu = m68k_env_get_cpu(env);
CPUState *cs = CPU(cpu);
hwaddr physical;
int access_type;
int prot;
int ret;
target_ulong page_size;
access_type = ACCESS_PTEST;
if (env->dfc & 4) {
access_type |= ACCESS_SUPER;
}
if ((env->dfc & 3) == 2) {
access_type |= ACCESS_CODE;
}
if (!is_read) {
access_type |= ACCESS_STORE;
}
env->mmu.mmusr = 0;
env->mmu.ssw = 0;
ret = get_physical_address(env, &physical, &prot, addr,
access_type, &page_size);
if (ret == 0) {
addr &= TARGET_PAGE_MASK;
physical += addr & (page_size - 1);
tlb_set_page(cs, addr, physical,
prot, access_type & ACCESS_SUPER ?
MMU_KERNEL_IDX : MMU_USER_IDX, page_size);
}
}
void HELPER(pflush)(CPUM68KState *env, uint32_t addr, uint32_t opmode)
{
M68kCPU *cpu = m68k_env_get_cpu(env);
switch (opmode) {
case 0: /* Flush page entry if not global */
case 1: /* Flush page entry */
tlb_flush_page(CPU(cpu), addr);
break;
case 2: /* Flush all except global entries */
tlb_flush(CPU(cpu));
break;
case 3: /* Flush all entries */
tlb_flush(CPU(cpu));
break;
}
}
void HELPER(reset)(CPUM68KState *env)
{
/* FIXME: reset all except CPU */

View File

@ -101,5 +101,7 @@ DEF_HELPER_3(chk, void, env, s32, s32)
DEF_HELPER_4(chk2, void, env, s32, s32, s32)
#if defined(CONFIG_SOFTMMU)
DEF_HELPER_3(ptest, void, env, i32, i32)
DEF_HELPER_3(pflush, void, env, i32, i32)
DEF_HELPER_FLAGS_1(reset, TCG_CALL_NO_RWG, void, env)
#endif

View File

@ -8,6 +8,19 @@
#include "qemu/osdep.h"
#include "cpu.h"
#include "monitor/hmp-target.h"
#include "monitor/monitor.h"
void hmp_info_tlb(Monitor *mon, const QDict *qdict)
{
CPUArchState *env1 = mon_get_cpu_env();
if (!env1) {
monitor_printf(mon, "No CPU available\n");
return;
}
dump_mmu((FILE *)mon, (fprintf_function)monitor_printf, env1);
}
static const MonitorDef monitor_defs[] = {
{ "d0", offsetof(CPUM68KState, dregs[0]) },
@ -31,6 +44,15 @@ static const MonitorDef monitor_defs[] = {
{ "ssp", offsetof(CPUM68KState, sp[0]) },
{ "usp", offsetof(CPUM68KState, sp[1]) },
{ "isp", offsetof(CPUM68KState, sp[2]) },
{ "sfc", offsetof(CPUM68KState, sfc) },
{ "dfc", offsetof(CPUM68KState, dfc) },
{ "urp", offsetof(CPUM68KState, mmu.urp) },
{ "srp", offsetof(CPUM68KState, mmu.srp) },
{ "dttr0", offsetof(CPUM68KState, mmu.ttr[M68K_DTTR0]) },
{ "dttr1", offsetof(CPUM68KState, mmu.ttr[M68K_DTTR1]) },
{ "ittr0", offsetof(CPUM68KState, mmu.ttr[M68K_ITTR0]) },
{ "ittr1", offsetof(CPUM68KState, mmu.ttr[M68K_ITTR1]) },
{ "mmusr", offsetof(CPUM68KState, mmu.mmusr) },
{ NULL },
};

View File

@ -39,12 +39,12 @@ static inline void do_interrupt_m68k_hardirq(CPUM68KState *env)
/* Try to fill the TLB and return an exception if error. If retaddr is
NULL, it means that the function was called in C code (i.e. not
from generated code or from helper.c) */
void tlb_fill(CPUState *cs, target_ulong addr, MMUAccessType access_type,
int mmu_idx, uintptr_t retaddr)
void tlb_fill(CPUState *cs, target_ulong addr, int size,
MMUAccessType access_type, int mmu_idx, uintptr_t retaddr)
{
int ret;
ret = m68k_cpu_handle_mmu_fault(cs, addr, access_type, mmu_idx);
ret = m68k_cpu_handle_mmu_fault(cs, addr, size, access_type, mmu_idx);
if (unlikely(ret)) {
/* now we have a real cpu fault */
cpu_loop_exit_restore(cs, retaddr);
@ -360,7 +360,49 @@ static void m68k_interrupt_all(CPUM68KState *env, int is_hw)
sp = env->aregs[7];
sp &= ~1;
if (cs->exception_index == EXCP_ADDRESS) {
if (cs->exception_index == EXCP_ACCESS) {
if (env->mmu.fault) {
cpu_abort(cs, "DOUBLE MMU FAULT\n");
}
env->mmu.fault = true;
sp -= 4;
cpu_stl_kernel(env, sp, 0); /* push data 3 */
sp -= 4;
cpu_stl_kernel(env, sp, 0); /* push data 2 */
sp -= 4;
cpu_stl_kernel(env, sp, 0); /* push data 1 */
sp -= 4;
cpu_stl_kernel(env, sp, 0); /* write back 1 / push data 0 */
sp -= 4;
cpu_stl_kernel(env, sp, 0); /* write back 1 address */
sp -= 4;
cpu_stl_kernel(env, sp, 0); /* write back 2 data */
sp -= 4;
cpu_stl_kernel(env, sp, 0); /* write back 2 address */
sp -= 4;
cpu_stl_kernel(env, sp, 0); /* write back 3 data */
sp -= 4;
cpu_stl_kernel(env, sp, env->mmu.ar); /* write back 3 address */
sp -= 4;
cpu_stl_kernel(env, sp, env->mmu.ar); /* fault address */
sp -= 2;
cpu_stw_kernel(env, sp, 0); /* write back 1 status */
sp -= 2;
cpu_stw_kernel(env, sp, 0); /* write back 2 status */
sp -= 2;
cpu_stw_kernel(env, sp, 0); /* write back 3 status */
sp -= 2;
cpu_stw_kernel(env, sp, env->mmu.ssw); /* special status word */
sp -= 4;
cpu_stl_kernel(env, sp, env->mmu.ar); /* effective address */
do_stack_frame(env, &sp, 7, oldsr, 0, retaddr);
env->mmu.fault = false;
if (qemu_loglevel_mask(CPU_LOG_INT)) {
qemu_log(" "
"ssw: %08x ea: %08x sfc: %d dfc: %d\n",
env->mmu.ssw, env->mmu.ar, env->sfc, env->dfc);
}
} else if (cs->exception_index == EXCP_ADDRESS) {
do_stack_frame(env, &sp, 2, oldsr, 0, retaddr);
} else if (cs->exception_index == EXCP_ILLEGAL ||
cs->exception_index == EXCP_DIV0 ||
@ -408,6 +450,57 @@ static inline void do_interrupt_m68k_hardirq(CPUM68KState *env)
{
do_interrupt_all(env, 1);
}
void m68k_cpu_unassigned_access(CPUState *cs, hwaddr addr, bool is_write,
bool is_exec, int is_asi, unsigned size)
{
M68kCPU *cpu = M68K_CPU(cs);
CPUM68KState *env = &cpu->env;
#ifdef DEBUG_UNASSIGNED
qemu_log_mask(CPU_LOG_INT, "Unassigned " TARGET_FMT_plx " wr=%d exe=%d\n",
addr, is_write, is_exec);
#endif
if (env == NULL) {
/* when called from gdb, env is NULL */
return;
}
if (m68k_feature(env, M68K_FEATURE_M68040)) {
env->mmu.mmusr = 0;
env->mmu.ssw |= M68K_ATC_040;
/* FIXME: manage MMU table access error */
env->mmu.ssw &= ~M68K_TM_040;
if (env->sr & SR_S) { /* SUPERVISOR */
env->mmu.ssw |= M68K_TM_040_SUPER;
}
if (is_exec) { /* instruction or data */
env->mmu.ssw |= M68K_TM_040_CODE;
} else {
env->mmu.ssw |= M68K_TM_040_DATA;
}
env->mmu.ssw &= ~M68K_BA_SIZE_MASK;
switch (size) {
case 1:
env->mmu.ssw |= M68K_BA_SIZE_BYTE;
break;
case 2:
env->mmu.ssw |= M68K_BA_SIZE_WORD;
break;
case 4:
env->mmu.ssw |= M68K_BA_SIZE_LONG;
break;
}
if (!is_write) {
env->mmu.ssw |= M68K_RW_040;
}
env->mmu.ar = addr;
cs->exception_index = EXCP_ACCESS;
cpu_loop_exit(cs);
}
}
#endif
bool m68k_cpu_exec_interrupt(CPUState *cs, int interrupt_request)

View File

@ -115,7 +115,6 @@ typedef struct DisasContext {
int is_jmp;
CCOp cc_op; /* Current CC operation */
int cc_op_synced;
int user;
struct TranslationBlock *tb;
int singlestep_enabled;
TCGv_i64 mactmp;
@ -178,7 +177,11 @@ static void do_writebacks(DisasContext *s)
#if defined(CONFIG_USER_ONLY)
#define IS_USER(s) 1
#else
#define IS_USER(s) s->user
#define IS_USER(s) (!(s->tb->flags & TB_FLAGS_MSR_S))
#define SFC_INDEX(s) ((s->tb->flags & TB_FLAGS_SFC_S) ? \
MMU_KERNEL_IDX : MMU_USER_IDX)
#define DFC_INDEX(s) ((s->tb->flags & TB_FLAGS_DFC_S) ? \
MMU_KERNEL_IDX : MMU_USER_IDX)
#endif
typedef void (*disas_proc)(CPUM68KState *env, DisasContext *s, uint16_t insn);
@ -281,10 +284,10 @@ static inline void gen_addr_fault(DisasContext *s)
/* Generate a load from the specified address. Narrow values are
sign extended to full register width. */
static inline TCGv gen_load(DisasContext * s, int opsize, TCGv addr, int sign)
static inline TCGv gen_load(DisasContext *s, int opsize, TCGv addr,
int sign, int index)
{
TCGv tmp;
int index = IS_USER(s);
tmp = tcg_temp_new_i32();
switch(opsize) {
case OS_BYTE:
@ -309,9 +312,9 @@ static inline TCGv gen_load(DisasContext * s, int opsize, TCGv addr, int sign)
}
/* Generate a store. */
static inline void gen_store(DisasContext *s, int opsize, TCGv addr, TCGv val)
static inline void gen_store(DisasContext *s, int opsize, TCGv addr, TCGv val,
int index)
{
int index = IS_USER(s);
switch(opsize) {
case OS_BYTE:
tcg_gen_qemu_st8(val, addr, index);
@ -336,13 +339,13 @@ typedef enum {
/* Generate an unsigned load if VAL is 0 a signed load if val is -1,
otherwise generate a store. */
static TCGv gen_ldst(DisasContext *s, int opsize, TCGv addr, TCGv val,
ea_what what)
ea_what what, int index)
{
if (what == EA_STORE) {
gen_store(s, opsize, addr, val);
gen_store(s, opsize, addr, val, index);
return store_dummy;
} else {
return gen_load(s, opsize, addr, what == EA_LOADS);
return gen_load(s, opsize, addr, what == EA_LOADS, index);
}
}
@ -464,7 +467,7 @@ static TCGv gen_lea_indexed(CPUM68KState *env, DisasContext *s, TCGv base)
}
if ((ext & 3) != 0) {
/* memory indirect */
base = gen_load(s, OS_LONG, add, 0);
base = gen_load(s, OS_LONG, add, 0, IS_USER(s));
if ((ext & 0x44) == 4) {
add = gen_addr_index(s, ext, tmp);
tcg_gen_add_i32(tmp, add, base);
@ -793,7 +796,8 @@ static TCGv gen_lea(CPUM68KState *env, DisasContext *s, uint16_t insn,
a write otherwise it is a read (0 == sign extend, -1 == zero extend).
ADDRP is non-null for readwrite operands. */
static TCGv gen_ea_mode(CPUM68KState *env, DisasContext *s, int mode, int reg0,
int opsize, TCGv val, TCGv *addrp, ea_what what)
int opsize, TCGv val, TCGv *addrp, ea_what what,
int index)
{
TCGv reg, tmp, result;
int32_t offset;
@ -817,10 +821,10 @@ static TCGv gen_ea_mode(CPUM68KState *env, DisasContext *s, int mode, int reg0,
}
case 2: /* Indirect register */
reg = get_areg(s, reg0);
return gen_ldst(s, opsize, reg, val, what);
return gen_ldst(s, opsize, reg, val, what, index);
case 3: /* Indirect postincrement. */
reg = get_areg(s, reg0);
result = gen_ldst(s, opsize, reg, val, what);
result = gen_ldst(s, opsize, reg, val, what, index);
if (what == EA_STORE || !addrp) {
TCGv tmp = tcg_temp_new();
if (reg0 == 7 && opsize == OS_BYTE &&
@ -844,7 +848,7 @@ static TCGv gen_ea_mode(CPUM68KState *env, DisasContext *s, int mode, int reg0,
*addrp = tmp;
}
}
result = gen_ldst(s, opsize, tmp, val, what);
result = gen_ldst(s, opsize, tmp, val, what, index);
if (what == EA_STORE || !addrp) {
delay_set_areg(s, reg0, tmp, false);
}
@ -863,7 +867,7 @@ static TCGv gen_ea_mode(CPUM68KState *env, DisasContext *s, int mode, int reg0,
*addrp = tmp;
}
}
return gen_ldst(s, opsize, tmp, val, what);
return gen_ldst(s, opsize, tmp, val, what, index);
case 7: /* Other */
switch (reg0) {
case 0: /* Absolute short. */
@ -904,11 +908,11 @@ static TCGv gen_ea_mode(CPUM68KState *env, DisasContext *s, int mode, int reg0,
}
static TCGv gen_ea(CPUM68KState *env, DisasContext *s, uint16_t insn,
int opsize, TCGv val, TCGv *addrp, ea_what what)
int opsize, TCGv val, TCGv *addrp, ea_what what, int index)
{
int mode = extract32(insn, 3, 3);
int reg0 = REG(insn, 0);
return gen_ea_mode(env, s, mode, reg0, opsize, val, addrp, what);
return gen_ea_mode(env, s, mode, reg0, opsize, val, addrp, what, index);
}
static TCGv_ptr gen_fp_ptr(int freg)
@ -941,11 +945,11 @@ static void gen_fp_move(TCGv_ptr dest, TCGv_ptr src)
tcg_temp_free_i64(t64);
}
static void gen_load_fp(DisasContext *s, int opsize, TCGv addr, TCGv_ptr fp)
static void gen_load_fp(DisasContext *s, int opsize, TCGv addr, TCGv_ptr fp,
int index)
{
TCGv tmp;
TCGv_i64 t64;
int index = IS_USER(s);
t64 = tcg_temp_new_i64();
tmp = tcg_temp_new();
@ -969,7 +973,6 @@ static void gen_load_fp(DisasContext *s, int opsize, TCGv addr, TCGv_ptr fp)
case OS_DOUBLE:
tcg_gen_qemu_ld64(t64, addr, index);
gen_helper_extf64(cpu_env, fp, t64);
tcg_temp_free_i64(t64);
break;
case OS_EXTENDED:
if (m68k_feature(s->env, M68K_FEATURE_CF_FPU)) {
@ -996,11 +999,11 @@ static void gen_load_fp(DisasContext *s, int opsize, TCGv addr, TCGv_ptr fp)
tcg_temp_free_i64(t64);
}
static void gen_store_fp(DisasContext *s, int opsize, TCGv addr, TCGv_ptr fp)
static void gen_store_fp(DisasContext *s, int opsize, TCGv addr, TCGv_ptr fp,
int index)
{
TCGv tmp;
TCGv_i64 t64;
int index = IS_USER(s);
t64 = tcg_temp_new_i64();
tmp = tcg_temp_new();
@ -1051,17 +1054,18 @@ static void gen_store_fp(DisasContext *s, int opsize, TCGv addr, TCGv_ptr fp)
}
static void gen_ldst_fp(DisasContext *s, int opsize, TCGv addr,
TCGv_ptr fp, ea_what what)
TCGv_ptr fp, ea_what what, int index)
{
if (what == EA_STORE) {
gen_store_fp(s, opsize, addr, fp);
gen_store_fp(s, opsize, addr, fp, index);
} else {
gen_load_fp(s, opsize, addr, fp);
gen_load_fp(s, opsize, addr, fp, index);
}
}
static int gen_ea_mode_fp(CPUM68KState *env, DisasContext *s, int mode,
int reg0, int opsize, TCGv_ptr fp, ea_what what)
int reg0, int opsize, TCGv_ptr fp, ea_what what,
int index)
{
TCGv reg, addr, tmp;
TCGv_i64 t64;
@ -1109,11 +1113,11 @@ static int gen_ea_mode_fp(CPUM68KState *env, DisasContext *s, int mode,
return -1;
case 2: /* Indirect register */
addr = get_areg(s, reg0);
gen_ldst_fp(s, opsize, addr, fp, what);
gen_ldst_fp(s, opsize, addr, fp, what, index);
return 0;
case 3: /* Indirect postincrement. */
addr = cpu_aregs[reg0];
gen_ldst_fp(s, opsize, addr, fp, what);
gen_ldst_fp(s, opsize, addr, fp, what, index);
tcg_gen_addi_i32(addr, addr, opsize_bytes(opsize));
return 0;
case 4: /* Indirect predecrememnt. */
@ -1121,7 +1125,7 @@ static int gen_ea_mode_fp(CPUM68KState *env, DisasContext *s, int mode,
if (IS_NULL_QREG(addr)) {
return -1;
}
gen_ldst_fp(s, opsize, addr, fp, what);
gen_ldst_fp(s, opsize, addr, fp, what, index);
tcg_gen_mov_i32(cpu_aregs[reg0], addr);
return 0;
case 5: /* Indirect displacement. */
@ -1131,7 +1135,7 @@ static int gen_ea_mode_fp(CPUM68KState *env, DisasContext *s, int mode,
if (IS_NULL_QREG(addr)) {
return -1;
}
gen_ldst_fp(s, opsize, addr, fp, what);
gen_ldst_fp(s, opsize, addr, fp, what, index);
return 0;
case 7: /* Other */
switch (reg0) {
@ -1200,11 +1204,11 @@ static int gen_ea_mode_fp(CPUM68KState *env, DisasContext *s, int mode,
}
static int gen_ea_fp(CPUM68KState *env, DisasContext *s, uint16_t insn,
int opsize, TCGv_ptr fp, ea_what what)
int opsize, TCGv_ptr fp, ea_what what, int index)
{
int mode = extract32(insn, 3, 3);
int reg0 = REG(insn, 0);
return gen_ea_mode_fp(env, s, mode, reg0, opsize, fp, what);
return gen_ea_mode_fp(env, s, mode, reg0, opsize, fp, what, index);
}
typedef struct {
@ -1424,7 +1428,7 @@ static void gen_lookup_tb(DisasContext *s)
#define SRC_EA(env, result, opsize, op_sign, addrp) do { \
result = gen_ea(env, s, insn, opsize, NULL_QREG, addrp, \
op_sign ? EA_LOADS : EA_LOADU); \
op_sign ? EA_LOADS : EA_LOADU, IS_USER(s)); \
if (IS_NULL_QREG(result)) { \
gen_addr_fault(s); \
return; \
@ -1432,7 +1436,8 @@ static void gen_lookup_tb(DisasContext *s)
} while (0)
#define DEST_EA(env, insn, opsize, val, addrp) do { \
TCGv ea_result = gen_ea(env, s, insn, opsize, val, addrp, EA_STORE); \
TCGv ea_result = gen_ea(env, s, insn, opsize, val, addrp, \
EA_STORE, IS_USER(s)); \
if (IS_NULL_QREG(ea_result)) { \
gen_addr_fault(s); \
return; \
@ -1769,13 +1774,14 @@ DISAS_INSN(abcd_mem)
/* Indirect pre-decrement load (mode 4) */
src = gen_ea_mode(env, s, 4, REG(insn, 0), OS_BYTE,
NULL_QREG, NULL, EA_LOADU);
NULL_QREG, NULL, EA_LOADU, IS_USER(s));
dest = gen_ea_mode(env, s, 4, REG(insn, 9), OS_BYTE,
NULL_QREG, &addr, EA_LOADU);
NULL_QREG, &addr, EA_LOADU, IS_USER(s));
bcd_add(dest, src);
gen_ea_mode(env, s, 4, REG(insn, 9), OS_BYTE, dest, &addr, EA_STORE);
gen_ea_mode(env, s, 4, REG(insn, 9), OS_BYTE, dest, &addr,
EA_STORE, IS_USER(s));
bcd_flags(dest);
}
@ -1805,13 +1811,14 @@ DISAS_INSN(sbcd_mem)
/* Indirect pre-decrement load (mode 4) */
src = gen_ea_mode(env, s, 4, REG(insn, 0), OS_BYTE,
NULL_QREG, NULL, EA_LOADU);
NULL_QREG, NULL, EA_LOADU, IS_USER(s));
dest = gen_ea_mode(env, s, 4, REG(insn, 9), OS_BYTE,
NULL_QREG, &addr, EA_LOADU);
NULL_QREG, &addr, EA_LOADU, IS_USER(s));
bcd_sub(dest, src);
gen_ea_mode(env, s, 4, REG(insn, 9), OS_BYTE, dest, &addr, EA_STORE);
gen_ea_mode(env, s, 4, REG(insn, 9), OS_BYTE, dest, &addr,
EA_STORE, IS_USER(s));
bcd_flags(dest);
}
@ -1948,7 +1955,7 @@ static void gen_push(DisasContext *s, TCGv val)
tmp = tcg_temp_new();
tcg_gen_subi_i32(tmp, QREG_SP, 4);
gen_store(s, OS_LONG, tmp, val);
gen_store(s, OS_LONG, tmp, val, IS_USER(s));
tcg_gen_mov_i32(QREG_SP, tmp);
tcg_temp_free(tmp);
}
@ -2017,7 +2024,7 @@ DISAS_INSN(movem)
/* memory to register */
for (i = 0; i < 16; i++) {
if (mask & (1 << i)) {
r[i] = gen_load(s, opsize, addr, 1);
r[i] = gen_load(s, opsize, addr, 1, IS_USER(s));
tcg_gen_add_i32(addr, addr, incr);
}
}
@ -2049,10 +2056,10 @@ DISAS_INSN(movem)
*/
tmp = tcg_temp_new();
tcg_gen_sub_i32(tmp, cpu_aregs[reg0], incr);
gen_store(s, opsize, addr, tmp);
gen_store(s, opsize, addr, tmp, IS_USER(s));
tcg_temp_free(tmp);
} else {
gen_store(s, opsize, addr, mreg(i));
gen_store(s, opsize, addr, mreg(i), IS_USER(s));
}
}
}
@ -2060,7 +2067,7 @@ DISAS_INSN(movem)
} else {
for (i = 0; i < 16; i++) {
if (mask & (1 << i)) {
gen_store(s, opsize, addr, mreg(i));
gen_store(s, opsize, addr, mreg(i), IS_USER(s));
tcg_gen_add_i32(addr, addr, incr);
}
}
@ -2780,7 +2787,7 @@ static void gen_link(DisasContext *s, uint16_t insn, int32_t offset)
reg = AREG(insn, 0);
tmp = tcg_temp_new();
tcg_gen_subi_i32(tmp, QREG_SP, 4);
gen_store(s, OS_LONG, tmp, reg);
gen_store(s, OS_LONG, tmp, reg, IS_USER(s));
if ((insn & 7) != 7) {
tcg_gen_mov_i32(reg, tmp);
}
@ -2813,7 +2820,7 @@ DISAS_INSN(unlk)
src = tcg_temp_new();
reg = AREG(insn, 0);
tcg_gen_mov_i32(src, reg);
tmp = gen_load(s, OS_LONG, src, 0);
tmp = gen_load(s, OS_LONG, src, 0, IS_USER(s));
tcg_gen_mov_i32(reg, tmp);
tcg_gen_addi_i32(QREG_SP, src, 4);
tcg_temp_free(src);
@ -2840,7 +2847,7 @@ DISAS_INSN(rtd)
TCGv tmp;
int16_t offset = read_im16(env, s);
tmp = gen_load(s, OS_LONG, QREG_SP, 0);
tmp = gen_load(s, OS_LONG, QREG_SP, 0, IS_USER(s));
tcg_gen_addi_i32(QREG_SP, QREG_SP, offset + 4);
gen_jmp(s, tmp);
}
@ -2849,7 +2856,7 @@ DISAS_INSN(rts)
{
TCGv tmp;
tmp = gen_load(s, OS_LONG, QREG_SP, 0);
tmp = gen_load(s, OS_LONG, QREG_SP, 0, IS_USER(s));
tcg_gen_addi_i32(QREG_SP, QREG_SP, 4);
gen_jmp(s, tmp);
}
@ -3085,15 +3092,15 @@ DISAS_INSN(subx_mem)
addr_src = AREG(insn, 0);
tcg_gen_subi_i32(addr_src, addr_src, opsize);
src = gen_load(s, opsize, addr_src, 1);
src = gen_load(s, opsize, addr_src, 1, IS_USER(s));
addr_dest = AREG(insn, 9);
tcg_gen_subi_i32(addr_dest, addr_dest, opsize);
dest = gen_load(s, opsize, addr_dest, 1);
dest = gen_load(s, opsize, addr_dest, 1, IS_USER(s));
gen_subx(s, src, dest, opsize);
gen_store(s, opsize, addr_dest, QREG_CC_N);
gen_store(s, opsize, addr_dest, QREG_CC_N, IS_USER(s));
}
DISAS_INSN(mov3q)
@ -3145,10 +3152,10 @@ DISAS_INSN(cmpm)
/* Post-increment load (mode 3) from Ay. */
src = gen_ea_mode(env, s, 3, REG(insn, 0), opsize,
NULL_QREG, NULL, EA_LOADS);
NULL_QREG, NULL, EA_LOADS, IS_USER(s));
/* Post-increment load (mode 3) from Ax. */
dst = gen_ea_mode(env, s, 3, REG(insn, 9), opsize,
NULL_QREG, NULL, EA_LOADS);
NULL_QREG, NULL, EA_LOADS, IS_USER(s));
gen_update_cc_cmp(s, dst, src, opsize);
}
@ -3291,15 +3298,15 @@ DISAS_INSN(addx_mem)
addr_src = AREG(insn, 0);
tcg_gen_subi_i32(addr_src, addr_src, opsize_bytes(opsize));
src = gen_load(s, opsize, addr_src, 1);
src = gen_load(s, opsize, addr_src, 1, IS_USER(s));
addr_dest = AREG(insn, 9);
tcg_gen_subi_i32(addr_dest, addr_dest, opsize_bytes(opsize));
dest = gen_load(s, opsize, addr_dest, 1);
dest = gen_load(s, opsize, addr_dest, 1, IS_USER(s));
gen_addx(s, src, dest, opsize);
gen_store(s, opsize, addr_dest, QREG_CC_N);
gen_store(s, opsize, addr_dest, QREG_CC_N, IS_USER(s));
}
static inline void shift_im(DisasContext *s, uint16_t insn, int opsize)
@ -4329,9 +4336,9 @@ DISAS_INSN(chk2)
addr2 = tcg_temp_new();
tcg_gen_addi_i32(addr2, addr1, opsize_bytes(opsize));
bound1 = gen_load(s, opsize, addr1, 1);
bound1 = gen_load(s, opsize, addr1, 1, IS_USER(s));
tcg_temp_free(addr1);
bound2 = gen_load(s, opsize, addr2, 1);
bound2 = gen_load(s, opsize, addr2, 1, IS_USER(s));
tcg_temp_free(addr2);
reg = tcg_temp_new();
@ -4449,6 +4456,64 @@ DISAS_INSN(move_from_sr)
}
#if defined(CONFIG_SOFTMMU)
DISAS_INSN(moves)
{
int opsize;
uint16_t ext;
TCGv reg;
TCGv addr;
int extend;
if (IS_USER(s)) {
gen_exception(s, s->insn_pc, EXCP_PRIVILEGE);
return;
}
ext = read_im16(env, s);
opsize = insn_opsize(insn);
if (ext & 0x8000) {
/* address register */
reg = AREG(ext, 12);
extend = 1;
} else {
/* data register */
reg = DREG(ext, 12);
extend = 0;
}
addr = gen_lea(env, s, insn, opsize);
if (IS_NULL_QREG(addr)) {
gen_addr_fault(s);
return;
}
if (ext & 0x0800) {
/* from reg to ea */
gen_store(s, opsize, addr, reg, DFC_INDEX(s));
} else {
/* from ea to reg */
TCGv tmp = gen_load(s, opsize, addr, 0, SFC_INDEX(s));
if (extend) {
gen_ext(reg, tmp, opsize, 1);
} else {
gen_partset_reg(opsize, reg, tmp);
}
}
switch (extract32(insn, 3, 3)) {
case 3: /* Indirect postincrement. */
tcg_gen_addi_i32(AREG(insn, 0), addr,
REG(insn, 0) == 7 && opsize == OS_BYTE
? 2
: opsize_bytes(opsize));
break;
case 4: /* Indirect predecrememnt. */
tcg_gen_mov_i32(AREG(insn, 0), addr);
break;
}
}
DISAS_INSN(move_to_sr)
{
if (IS_USER(s)) {
@ -4596,6 +4661,35 @@ DISAS_INSN(cinv)
/* Invalidate cache line. Implement as no-op. */
}
#if defined(CONFIG_SOFTMMU)
DISAS_INSN(pflush)
{
TCGv opmode;
if (IS_USER(s)) {
gen_exception(s, s->insn_pc, EXCP_PRIVILEGE);
return;
}
opmode = tcg_const_i32((insn >> 3) & 3);
gen_helper_pflush(cpu_env, AREG(insn, 0), opmode);
tcg_temp_free(opmode);
}
DISAS_INSN(ptest)
{
TCGv is_read;
if (IS_USER(s)) {
gen_exception(s, s->insn_pc, EXCP_PRIVILEGE);
return;
}
is_read = tcg_const_i32((insn >> 5) & 1);
gen_helper_ptest(cpu_env, AREG(insn, 0), is_read);
tcg_temp_free(is_read);
}
#endif
DISAS_INSN(wddata)
{
gen_exception(s, s->insn_pc, EXCP_PRIVILEGE);
@ -4844,7 +4938,8 @@ DISAS_INSN(fpu)
case 3: /* fmove out */
cpu_src = gen_fp_ptr(REG(ext, 7));
opsize = ext_opsize(ext, 10);
if (gen_ea_fp(env, s, insn, opsize, cpu_src, EA_STORE) == -1) {
if (gen_ea_fp(env, s, insn, opsize, cpu_src,
EA_STORE, IS_USER(s)) == -1) {
gen_addr_fault(s);
}
gen_helper_ftst(cpu_env, cpu_src);
@ -4866,7 +4961,8 @@ DISAS_INSN(fpu)
/* Source effective address. */
opsize = ext_opsize(ext, 10);
cpu_src = gen_fp_result_ptr();
if (gen_ea_fp(env, s, insn, opsize, cpu_src, EA_LOADS) == -1) {
if (gen_ea_fp(env, s, insn, opsize, cpu_src,
EA_LOADS, IS_USER(s)) == -1) {
gen_addr_fault(s);
return;
}
@ -5265,7 +5361,7 @@ DISAS_INSN(mac)
tcg_gen_and_i32(addr, tmp, QREG_MAC_MASK);
/* Load the value now to ensure correct exception behavior.
Perform writeback after reading the MAC inputs. */
loadval = gen_load(s, OS_LONG, addr, 0);
loadval = gen_load(s, OS_LONG, addr, 0, IS_USER(s));
acc ^= 1;
rx = (ext & 0x8000) ? AREG(ext, 12) : DREG(insn, 12);
@ -5601,6 +5697,9 @@ void register_m68k_insns (CPUM68KState *env)
BASE(bitop_im, 08c0, ffc0);
INSN(arith_im, 0a80, fff8, CF_ISA_A);
INSN(arith_im, 0a00, ff00, M68000);
#if defined(CONFIG_SOFTMMU)
INSN(moves, 0e00, ff00, M68000);
#endif
INSN(cas, 0ac0, ffc0, CAS);
INSN(cas, 0cc0, ffc0, CAS);
INSN(cas, 0ec0, ffc0, CAS);
@ -5784,6 +5883,8 @@ void register_m68k_insns (CPUM68KState *env)
INSN(cpushl, f428, ff38, CF_ISA_A);
INSN(cpush, f420, ff20, M68040);
INSN(cinv, f400, ff20, M68040);
INSN(pflush, f500, ffe0, M68040);
INSN(ptest, f548, ffd8, M68040);
INSN(wddata, fb00, ff00, CF_ISA_A);
INSN(wdebug, fbc0, ffc0, CF_ISA_A);
#endif
@ -5822,7 +5923,6 @@ void gen_intermediate_code(CPUState *cs, TranslationBlock *tb)
dc->cc_op = CC_OP_DYNAMIC;
dc->cc_op_synced = 1;
dc->singlestep_enabled = cs->singlestep_enabled;
dc->user = (env->sr & SR_S) == 0;
dc->done_mac = 0;
dc->writeback_mask = 0;
num_insns = 0;
@ -5981,6 +6081,14 @@ void m68k_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf,
env->current_sp == M68K_USP ? "->" : " ", env->sp[M68K_USP],
env->current_sp == M68K_ISP ? "->" : " ", env->sp[M68K_ISP]);
cpu_fprintf(f, "VBR = 0x%08x\n", env->vbr);
cpu_fprintf(f, "SFC = %x DFC %x\n", env->sfc, env->dfc);
cpu_fprintf(f, "SSW %08x TCR %08x URP %08x SRP %08x\n",
env->mmu.ssw, env->mmu.tcr, env->mmu.urp, env->mmu.srp);
cpu_fprintf(f, "DTTR0/1: %08x/%08x ITTR0/1: %08x/%08x\n",
env->mmu.ttr[M68K_DTTR0], env->mmu.ttr[M68K_DTTR1],
env->mmu.ttr[M68K_ITTR0], env->mmu.ttr[M68K_ITTR1]);
cpu_fprintf(f, "MMUSR %08x, fault at %08x\n",
env->mmu.mmusr, env->mmu.ar);
#endif
}

View File

@ -367,7 +367,7 @@ static inline int cpu_mmu_index (CPUMBState *env, bool ifetch)
return MMU_KERNEL_IDX;
}
int mb_cpu_handle_mmu_fault(CPUState *cpu, vaddr address, int rw,
int mb_cpu_handle_mmu_fault(CPUState *cpu, vaddr address, int size, int rw,
int mmu_idx);
#include "exec/cpu-all.h"

View File

@ -38,7 +38,7 @@ void mb_cpu_do_interrupt(CPUState *cs)
env->regs[14] = env->sregs[SR_PC];
}
int mb_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int rw,
int mb_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int size, int rw,
int mmu_idx)
{
cs->exception_index = 0xaa;
@ -48,7 +48,7 @@ int mb_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int rw,
#else /* !CONFIG_USER_ONLY */
int mb_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int rw,
int mb_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int size, int rw,
int mmu_idx)
{
MicroBlazeCPU *cpu = MICROBLAZE_CPU(cs);

View File

@ -33,12 +33,12 @@
* NULL, it means that the function was called in C code (i.e. not
* from generated code or from helper.c)
*/
void tlb_fill(CPUState *cs, target_ulong addr, MMUAccessType access_type,
int mmu_idx, uintptr_t retaddr)
void tlb_fill(CPUState *cs, target_ulong addr, int size,
MMUAccessType access_type, int mmu_idx, uintptr_t retaddr)
{
int ret;
ret = mb_cpu_handle_mmu_fault(cs, addr, access_type, mmu_idx);
ret = mb_cpu_handle_mmu_fault(cs, addr, size, access_type, mmu_idx);
if (unlikely(ret)) {
/* now we have a real cpu fault */
cpu_loop_exit_restore(cs, retaddr);

View File

@ -535,7 +535,7 @@ hwaddr mips_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
}
#endif
int mips_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int rw,
int mips_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int size, int rw,
int mmu_idx)
{
MIPSCPU *cpu = MIPS_CPU(cs);

View File

@ -202,7 +202,7 @@ void cpu_mips_start_count(CPUMIPSState *env);
void cpu_mips_stop_count(CPUMIPSState *env);
/* helper.c */
int mips_cpu_handle_mmu_fault(CPUState *cpu, vaddr address, int rw,
int mips_cpu_handle_mmu_fault(CPUState *cpu, vaddr address, int size, int rw,
int mmu_idx);
/* op_helper.c */

View File

@ -2451,12 +2451,12 @@ void mips_cpu_do_unaligned_access(CPUState *cs, vaddr addr,
do_raise_exception_err(env, excp, error_code, retaddr);
}
void tlb_fill(CPUState *cs, target_ulong addr, MMUAccessType access_type,
int mmu_idx, uintptr_t retaddr)
void tlb_fill(CPUState *cs, target_ulong addr, int size,
MMUAccessType access_type, int mmu_idx, uintptr_t retaddr)
{
int ret;
ret = mips_cpu_handle_mmu_fault(cs, addr, access_type, mmu_idx);
ret = mips_cpu_handle_mmu_fault(cs, addr, size, access_type, mmu_idx);
if (ret) {
MIPSCPU *cpu = MIPS_CPU(cs);
CPUMIPSState *env = &cpu->env;
@ -4190,10 +4190,10 @@ static inline void ensure_writable_pages(CPUMIPSState *env,
target_ulong page_addr;
if (unlikely(MSA_PAGESPAN(addr))) {
/* first page */
probe_write(env, addr, mmu_idx, retaddr);
probe_write(env, addr, 0, mmu_idx, retaddr);
/* second page */
page_addr = (addr & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
probe_write(env, page_addr, mmu_idx, retaddr);
probe_write(env, page_addr, 0, mmu_idx, retaddr);
}
#endif
}

View File

@ -142,7 +142,7 @@ static inline void cpu_get_tb_cpu_state(CPUMoxieState *env, target_ulong *pc,
*flags = 0;
}
int moxie_cpu_handle_mmu_fault(CPUState *cpu, vaddr address,
int moxie_cpu_handle_mmu_fault(CPUState *cpu, vaddr address, int size,
int rw, int mmu_idx);
#endif /* MOXIE_CPU_H */

View File

@ -29,12 +29,12 @@
/* Try to fill the TLB and return an exception if error. If retaddr is
NULL, it means that the function was called in C code (i.e. not
from generated code or from helper.c) */
void tlb_fill(CPUState *cs, target_ulong addr, MMUAccessType access_type,
int mmu_idx, uintptr_t retaddr)
void tlb_fill(CPUState *cs, target_ulong addr, int size,
MMUAccessType access_type, int mmu_idx, uintptr_t retaddr)
{
int ret;
ret = moxie_cpu_handle_mmu_fault(cs, addr, access_type, mmu_idx);
ret = moxie_cpu_handle_mmu_fault(cs, addr, size, access_type, mmu_idx);
if (unlikely(ret)) {
cpu_loop_exit_restore(cs, retaddr);
}
@ -94,7 +94,7 @@ void moxie_cpu_do_interrupt(CPUState *cs)
cs->exception_index = -1;
}
int moxie_cpu_handle_mmu_fault(CPUState *cs, vaddr address,
int moxie_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int size,
int rw, int mmu_idx)
{
MoxieCPU *cpu = MOXIE_CPU(cs);
@ -107,7 +107,7 @@ int moxie_cpu_handle_mmu_fault(CPUState *cs, vaddr address,
#else /* !CONFIG_USER_ONLY */
int moxie_cpu_handle_mmu_fault(CPUState *cs, vaddr address,
int moxie_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int size,
int rw, int mmu_idx)
{
MoxieCPU *cpu = MOXIE_CPU(cs);

View File

@ -252,7 +252,7 @@ static inline int cpu_mmu_index(CPUNios2State *env, bool ifetch)
MMU_SUPERVISOR_IDX;
}
int nios2_cpu_handle_mmu_fault(CPUState *env, vaddr address,
int nios2_cpu_handle_mmu_fault(CPUState *env, vaddr address, int size,
int rw, int mmu_idx);
static inline int cpu_interrupts_enabled(CPUNios2State *env)

View File

@ -37,7 +37,8 @@ void nios2_cpu_do_interrupt(CPUState *cs)
env->regs[R_EA] = env->regs[R_PC] + 4;
}
int nios2_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int rw, int mmu_idx)
int nios2_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int size,
int rw, int mmu_idx)
{
cs->exception_index = 0xaa;
/* Page 0x1000 is kuser helper */
@ -232,7 +233,8 @@ static int cpu_nios2_handle_virtual_page(
return 1;
}
int nios2_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int rw, int mmu_idx)
int nios2_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int size,
int rw, int mmu_idx)
{
Nios2CPU *cpu = NIOS2_CPU(cs);
CPUNios2State *env = &cpu->env;

View File

@ -35,12 +35,12 @@
#define MMU_LOG(x)
#endif
void tlb_fill(CPUState *cs, target_ulong addr, MMUAccessType access_type,
int mmu_idx, uintptr_t retaddr)
void tlb_fill(CPUState *cs, target_ulong addr, int size,
MMUAccessType access_type, int mmu_idx, uintptr_t retaddr)
{
int ret;
ret = nios2_cpu_handle_mmu_fault(cs, addr, access_type, mmu_idx);
ret = nios2_cpu_handle_mmu_fault(cs, addr, size, access_type, mmu_idx);
if (unlikely(ret)) {
/* now we have a real cpu fault */
cpu_loop_exit_restore(cs, retaddr);

View File

@ -356,7 +356,7 @@ hwaddr openrisc_cpu_get_phys_page_debug(CPUState *cpu, vaddr addr);
int openrisc_cpu_gdb_read_register(CPUState *cpu, uint8_t *buf, int reg);
int openrisc_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg);
void openrisc_translate_init(void);
int openrisc_cpu_handle_mmu_fault(CPUState *cpu, vaddr address,
int openrisc_cpu_handle_mmu_fault(CPUState *cpu, vaddr address, int size,
int rw, int mmu_idx);
int cpu_openrisc_signal_handler(int host_signum, void *pinfo, void *puc);

View File

@ -178,8 +178,8 @@ static void cpu_openrisc_raise_mmu_exception(OpenRISCCPU *cpu,
}
#ifndef CONFIG_USER_ONLY
int openrisc_cpu_handle_mmu_fault(CPUState *cs,
vaddr address, int rw, int mmu_idx)
int openrisc_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int size,
int rw, int mmu_idx)
{
OpenRISCCPU *cpu = OPENRISC_CPU(cs);
int ret = 0;
@ -202,8 +202,8 @@ int openrisc_cpu_handle_mmu_fault(CPUState *cs,
return ret;
}
#else
int openrisc_cpu_handle_mmu_fault(CPUState *cs,
vaddr address, int rw, int mmu_idx)
int openrisc_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int size,
int rw, int mmu_idx)
{
OpenRISCCPU *cpu = OPENRISC_CPU(cs);
int ret = 0;

View File

@ -25,12 +25,12 @@
#ifndef CONFIG_USER_ONLY
void tlb_fill(CPUState *cs, target_ulong addr, MMUAccessType access_type,
int mmu_idx, uintptr_t retaddr)
void tlb_fill(CPUState *cs, target_ulong addr, int size,
MMUAccessType access_type, int mmu_idx, uintptr_t retaddr)
{
int ret;
ret = openrisc_cpu_handle_mmu_fault(cs, addr, access_type, mmu_idx);
ret = openrisc_cpu_handle_mmu_fault(cs, addr, size, access_type, mmu_idx);
if (ret) {
/* Raise Exception. */

View File

@ -1308,7 +1308,7 @@ void ppc_translate_init(void);
int cpu_ppc_signal_handler (int host_signum, void *pinfo,
void *puc);
#if defined(CONFIG_USER_ONLY)
int ppc_cpu_handle_mmu_fault(CPUState *cpu, vaddr address, int rw,
int ppc_cpu_handle_mmu_fault(CPUState *cpu, vaddr address, int size, int rw,
int mmu_idx);
#endif

View File

@ -2925,8 +2925,8 @@ void helper_check_tlb_flush_global(CPUPPCState *env)
NULL, it means that the function was called in C code (i.e. not
from generated code or from helper.c) */
/* XXX: fix it to restore all registers */
void tlb_fill(CPUState *cs, target_ulong addr, MMUAccessType access_type,
int mmu_idx, uintptr_t retaddr)
void tlb_fill(CPUState *cs, target_ulong addr, int size,
MMUAccessType access_type, int mmu_idx, uintptr_t retaddr)
{
PowerPCCPU *cpu = POWERPC_CPU(cs);
PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cs);

View File

@ -21,7 +21,7 @@
#include "qemu/osdep.h"
#include "cpu.h"
int ppc_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int rw,
int ppc_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int size, int rw,
int mmu_idx)
{
PowerPCCPU *cpu = POWERPC_CPU(cs);

View File

@ -55,7 +55,7 @@ void s390_cpu_do_interrupt(CPUState *cs)
cs->exception_index = -1;
}
int s390_cpu_handle_mmu_fault(CPUState *cs, vaddr address,
int s390_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int size,
int rw, int mmu_idx)
{
S390CPU *cpu = S390_CPU(cs);
@ -83,7 +83,7 @@ static inline uint64_t cpu_mmu_idx_to_asc(int mmu_idx)
}
}
int s390_cpu_handle_mmu_fault(CPUState *cs, vaddr orig_vaddr,
int s390_cpu_handle_mmu_fault(CPUState *cs, vaddr orig_vaddr, int size,
int rw, int mmu_idx)
{
S390CPU *cpu = S390_CPU(cs);

View File

@ -323,7 +323,7 @@ ObjectClass *s390_cpu_class_by_name(const char *name);
void s390x_cpu_debug_excp_handler(CPUState *cs);
void s390_cpu_do_interrupt(CPUState *cpu);
bool s390_cpu_exec_interrupt(CPUState *cpu, int int_req);
int s390_cpu_handle_mmu_fault(CPUState *cpu, vaddr address, int rw,
int s390_cpu_handle_mmu_fault(CPUState *cpu, vaddr address, int size, int rw,
int mmu_idx);
void s390x_cpu_do_unaligned_access(CPUState *cs, vaddr addr,
MMUAccessType access_type,

View File

@ -39,10 +39,10 @@
NULL, it means that the function was called in C code (i.e. not
from generated code or from helper.c) */
/* XXX: fix it to restore all registers */
void tlb_fill(CPUState *cs, target_ulong addr, MMUAccessType access_type,
int mmu_idx, uintptr_t retaddr)
void tlb_fill(CPUState *cs, target_ulong addr, int size,
MMUAccessType access_type, int mmu_idx, uintptr_t retaddr)
{
int ret = s390_cpu_handle_mmu_fault(cs, addr, access_type, mmu_idx);
int ret = s390_cpu_handle_mmu_fault(cs, addr, size, access_type, mmu_idx);
if (unlikely(ret != 0)) {
cpu_loop_exit_restore(cs, retaddr);
}
@ -1440,7 +1440,7 @@ static uint32_t do_csst(CPUS390XState *env, uint32_t r3, uint64_t a1,
/* Sanity check writability of the store address. */
#ifndef CONFIG_USER_ONLY
probe_write(env, a2, mem_idx, ra);
probe_write(env, a2, 0, mem_idx, ra);
#endif
/* Note that the compare-and-swap is atomic, and the store is atomic, but

View File

@ -246,7 +246,7 @@ void superh_cpu_do_unaligned_access(CPUState *cpu, vaddr addr,
void sh4_translate_init(void);
int cpu_sh4_signal_handler(int host_signum, void *pinfo,
void *puc);
int superh_cpu_handle_mmu_fault(CPUState *cpu, vaddr address, int rw,
int superh_cpu_handle_mmu_fault(CPUState *cpu, vaddr address, int size, int rw,
int mmu_idx);
void sh4_cpu_list(FILE *f, fprintf_function cpu_fprintf);

View File

@ -34,7 +34,7 @@ void superh_cpu_do_interrupt(CPUState *cs)
cs->exception_index = -1;
}
int superh_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int rw,
int superh_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int size, int rw,
int mmu_idx)
{
SuperHCPU *cpu = SUPERH_CPU(cs);
@ -458,7 +458,7 @@ static int get_physical_address(CPUSH4State * env, target_ulong * physical,
return get_mmu_address(env, physical, prot, address, rw, access_type);
}
int superh_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int rw,
int superh_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int size, int rw,
int mmu_idx)
{
SuperHCPU *cpu = SUPERH_CPU(cs);

View File

@ -40,12 +40,12 @@ void superh_cpu_do_unaligned_access(CPUState *cs, vaddr addr,
cpu_loop_exit_restore(cs, retaddr);
}
void tlb_fill(CPUState *cs, target_ulong addr, MMUAccessType access_type,
int mmu_idx, uintptr_t retaddr)
void tlb_fill(CPUState *cs, target_ulong addr, int size,
MMUAccessType access_type, int mmu_idx, uintptr_t retaddr)
{
int ret;
ret = superh_cpu_handle_mmu_fault(cs, addr, access_type, mmu_idx);
ret = superh_cpu_handle_mmu_fault(cs, addr, size, access_type, mmu_idx);
if (ret) {
/* now we have a real cpu fault */
cpu_loop_exit_restore(cs, retaddr);

View File

@ -582,7 +582,7 @@ void cpu_raise_exception_ra(CPUSPARCState *, int, uintptr_t) QEMU_NORETURN;
void cpu_sparc_set_id(CPUSPARCState *env, unsigned int cpu);
void sparc_cpu_list(FILE *f, fprintf_function cpu_fprintf);
/* mmu_helper.c */
int sparc_cpu_handle_mmu_fault(CPUState *cpu, vaddr address, int rw,
int sparc_cpu_handle_mmu_fault(CPUState *cpu, vaddr address, int size, int rw,
int mmu_idx);
target_ulong mmu_probe(CPUSPARCState *env, target_ulong address, int mmulev);
void dump_mmu(FILE *f, fprintf_function cpu_fprintf, CPUSPARCState *env);

View File

@ -1929,12 +1929,12 @@ void QEMU_NORETURN sparc_cpu_do_unaligned_access(CPUState *cs, vaddr addr,
NULL, it means that the function was called in C code (i.e. not
from generated code or from helper.c) */
/* XXX: fix it to restore all registers */
void tlb_fill(CPUState *cs, target_ulong addr, MMUAccessType access_type,
int mmu_idx, uintptr_t retaddr)
void tlb_fill(CPUState *cs, target_ulong addr, int size,
MMUAccessType access_type, int mmu_idx, uintptr_t retaddr)
{
int ret;
ret = sparc_cpu_handle_mmu_fault(cs, addr, access_type, mmu_idx);
ret = sparc_cpu_handle_mmu_fault(cs, addr, size, access_type, mmu_idx);
if (ret) {
cpu_loop_exit_restore(cs, retaddr);
}

View File

@ -27,7 +27,7 @@
#if defined(CONFIG_USER_ONLY)
int sparc_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int rw,
int sparc_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int size, int rw,
int mmu_idx)
{
SPARCCPU *cpu = SPARC_CPU(cs);
@ -208,7 +208,7 @@ static int get_physical_address(CPUSPARCState *env, hwaddr *physical,
}
/* Perform address translation */
int sparc_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int rw,
int sparc_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int size, int rw,
int mmu_idx)
{
SPARCCPU *cpu = SPARC_CPU(cs);
@ -713,7 +713,7 @@ static int get_physical_address(CPUSPARCState *env, hwaddr *physical,
}
/* Perform address translation */
int sparc_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int rw,
int sparc_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int size, int rw,
int mmu_idx)
{
SPARCCPU *cpu = SPARC_CPU(cs);

View File

@ -112,8 +112,8 @@ static void tilegx_cpu_do_interrupt(CPUState *cs)
cs->exception_index = -1;
}
static int tilegx_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int rw,
int mmu_idx)
static int tilegx_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int size,
int rw, int mmu_idx)
{
TileGXCPU *cpu = TILEGX_CPU(cs);

View File

@ -2806,8 +2806,8 @@ static inline void QEMU_NORETURN do_raise_exception_err(CPUTriCoreState *env,
cpu_loop_exit_restore(cs, pc);
}
void tlb_fill(CPUState *cs, target_ulong addr, MMUAccessType access_type,
int mmu_idx, uintptr_t retaddr)
void tlb_fill(CPUState *cs, target_ulong addr, int size,
MMUAccessType access_type, int mmu_idx, uintptr_t retaddr)
{
int ret;
ret = cpu_tricore_handle_mmu_fault(cs, addr, access_type, mmu_idx);

View File

@ -181,7 +181,7 @@ static inline void cpu_get_tb_cpu_state(CPUUniCore32State *env, target_ulong *pc
}
}
int uc32_cpu_handle_mmu_fault(CPUState *cpu, vaddr address, int rw,
int uc32_cpu_handle_mmu_fault(CPUState *cpu, vaddr address, int size, int rw,
int mmu_idx);
void uc32_translate_init(void);
void switch_mode(CPUUniCore32State *, int);

View File

@ -230,7 +230,7 @@ void uc32_cpu_do_interrupt(CPUState *cs)
cpu_abort(cs, "NO interrupt in user mode\n");
}
int uc32_cpu_handle_mmu_fault(CPUState *cs, vaddr address,
int uc32_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int size,
int access_type, int mmu_idx)
{
cpu_abort(cs, "NO mmu fault in user mode\n");

View File

@ -244,12 +244,12 @@ uint32_t HELPER(ror_cc)(CPUUniCore32State *env, uint32_t x, uint32_t i)
}
#ifndef CONFIG_USER_ONLY
void tlb_fill(CPUState *cs, target_ulong addr, MMUAccessType access_type,
int mmu_idx, uintptr_t retaddr)
void tlb_fill(CPUState *cs, target_ulong addr, int size,
MMUAccessType access_type, int mmu_idx, uintptr_t retaddr)
{
int ret;
ret = uc32_cpu_handle_mmu_fault(cs, addr, access_type, mmu_idx);
ret = uc32_cpu_handle_mmu_fault(cs, addr, size, access_type, mmu_idx);
if (unlikely(ret)) {
/* now we have a real cpu fault */
cpu_loop_exit_restore(cs, retaddr);

View File

@ -215,7 +215,7 @@ do_fault:
return code;
}
int uc32_cpu_handle_mmu_fault(CPUState *cs, vaddr address,
int uc32_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int size,
int access_type, int mmu_idx)
{
UniCore32CPU *cpu = UNICORE32_CPU(cs);

View File

@ -50,8 +50,8 @@ void xtensa_cpu_do_unaligned_access(CPUState *cs,
}
}
void tlb_fill(CPUState *cs, target_ulong vaddr, MMUAccessType access_type,
int mmu_idx, uintptr_t retaddr)
void tlb_fill(CPUState *cs, target_ulong vaddr, int size,
MMUAccessType access_type, int mmu_idx, uintptr_t retaddr)
{
XtensaCPU *cpu = XTENSA_CPU(cs);
CPUXtensaState *env = &cpu->env;